2024-12-07 12:19:43,294 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-07 12:19:43,307 main DEBUG Took 0.011078 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 12:19:43,307 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 12:19:43,307 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 12:19:43,308 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 12:19:43,309 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,316 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 12:19:43,327 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,328 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,330 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,331 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,331 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,331 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,332 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,332 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,333 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,333 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,333 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,334 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,334 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,334 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,335 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,335 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,335 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,336 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,336 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,336 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,337 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 12:19:43,337 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,337 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 12:19:43,339 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 12:19:43,340 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 12:19:43,342 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 12:19:43,343 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 12:19:43,344 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 12:19:43,344 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 12:19:43,352 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 12:19:43,355 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 12:19:43,356 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 12:19:43,357 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 12:19:43,357 main DEBUG createAppenders(={Console}) 2024-12-07 12:19:43,358 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-07 12:19:43,358 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-07 12:19:43,358 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-07 12:19:43,359 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 12:19:43,359 main DEBUG OutputStream closed 2024-12-07 12:19:43,359 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 12:19:43,360 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 12:19:43,360 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-07 12:19:43,425 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 12:19:43,427 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 12:19:43,428 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 12:19:43,428 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 12:19:43,429 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 12:19:43,429 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 12:19:43,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 12:19:43,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 12:19:43,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 12:19:43,430 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 12:19:43,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 12:19:43,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 12:19:43,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 12:19:43,431 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 12:19:43,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 12:19:43,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 12:19:43,432 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 12:19:43,433 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 12:19:43,435 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 12:19:43,435 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-07 12:19:43,436 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 12:19:43,436 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-07T12:19:43,706 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877 2024-12-07 12:19:43,709 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 12:19:43,710 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T12:19:43,723 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-07T12:19:43,759 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=287, ProcessCount=11, AvailableMemoryMB=6987 2024-12-07T12:19:43,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:19:43,782 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02, deleteOnExit=true 2024-12-07T12:19:43,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:19:43,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/test.cache.data in system properties and HBase conf 2024-12-07T12:19:43,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:19:43,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:19:43,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:19:43,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:19:43,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:19:43,869 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T12:19:43,958 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:19:43,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:19:43,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:19:43,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:19:43,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:19:43,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:19:43,965 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:19:43,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:19:43,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:19:43,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:19:43,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:19:43,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:19:43,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:19:43,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:19:43,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:19:44,433 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:19:44,772 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T12:19:44,868 INFO [Time-limited test {}] log.Log(170): Logging initialized @2285ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T12:19:44,954 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:19:45,022 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:19:45,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:19:45,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:19:45,047 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:19:45,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:19:45,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:19:45,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:19:45,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/java.io.tmpdir/jetty-localhost-45249-hadoop-hdfs-3_4_1-tests_jar-_-any-15779411719477606483/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:19:45,267 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:45249} 2024-12-07T12:19:45,268 INFO [Time-limited test {}] server.Server(415): Started @2685ms 2024-12-07T12:19:45,297 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:19:45,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:19:45,745 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:19:45,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:19:45,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:19:45,747 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:19:45,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:19:45,749 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:19:45,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/java.io.tmpdir/jetty-localhost-40725-hadoop-hdfs-3_4_1-tests_jar-_-any-10268769564836300324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:19:45,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:40725} 2024-12-07T12:19:45,905 INFO [Time-limited test {}] server.Server(415): Started @3322ms 2024-12-07T12:19:45,978 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:19:46,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:19:46,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:19:46,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:19:46,137 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:19:46,137 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:19:46,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:19:46,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:19:46,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/java.io.tmpdir/jetty-localhost-45395-hadoop-hdfs-3_4_1-tests_jar-_-any-13985601307845665007/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:19:46,304 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:45395} 2024-12-07T12:19:46,304 INFO [Time-limited test {}] server.Server(415): Started @3722ms 2024-12-07T12:19:46,308 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:19:46,634 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data2/current/BP-1537761316-172.17.0.2-1733573984532/current, will proceed with Du for space computation calculation, 2024-12-07T12:19:46,634 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data3/current/BP-1537761316-172.17.0.2-1733573984532/current, will proceed with Du for space computation calculation, 2024-12-07T12:19:46,634 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data1/current/BP-1537761316-172.17.0.2-1733573984532/current, will proceed with Du for space computation calculation, 2024-12-07T12:19:46,640 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data4/current/BP-1537761316-172.17.0.2-1733573984532/current, will proceed with Du for space computation calculation, 2024-12-07T12:19:46,710 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:19:46,716 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:19:46,797 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cbfa15c0e7dd492 with lease ID 0x710cf6d1703348bf: Processing first storage report for DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1 from datanode DatanodeRegistration(127.0.0.1:37439, datanodeUuid=96a83e5d-1cfc-4e5c-8b35-1c75888d684a, infoPort=42021, infoSecurePort=0, ipcPort=37913, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532) 2024-12-07T12:19:46,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cbfa15c0e7dd492 with lease ID 0x710cf6d1703348bf: from storage DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1 node DatanodeRegistration(127.0.0.1:37439, datanodeUuid=96a83e5d-1cfc-4e5c-8b35-1c75888d684a, infoPort=42021, infoSecurePort=0, ipcPort=37913, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:19:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x458e91fa20b99434 with lease ID 0x710cf6d1703348be: Processing first storage report for DS-05fec530-141e-4a93-a5b6-d0700897592c from datanode DatanodeRegistration(127.0.0.1:42413, datanodeUuid=b19de99b-07ed-4ea1-8bcd-830e280a3f10, infoPort=45535, infoSecurePort=0, ipcPort=44615, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532) 2024-12-07T12:19:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x458e91fa20b99434 with lease ID 0x710cf6d1703348be: from storage DS-05fec530-141e-4a93-a5b6-d0700897592c node DatanodeRegistration(127.0.0.1:42413, datanodeUuid=b19de99b-07ed-4ea1-8bcd-830e280a3f10, infoPort=45535, infoSecurePort=0, ipcPort=44615, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:19:46,800 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cbfa15c0e7dd492 with lease ID 0x710cf6d1703348bf: Processing first storage report for DS-fe6da16e-b6ef-4aa9-b135-ebfb7ff6aee6 from datanode DatanodeRegistration(127.0.0.1:37439, datanodeUuid=96a83e5d-1cfc-4e5c-8b35-1c75888d684a, infoPort=42021, infoSecurePort=0, ipcPort=37913, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532) 2024-12-07T12:19:46,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cbfa15c0e7dd492 with lease ID 0x710cf6d1703348bf: from storage DS-fe6da16e-b6ef-4aa9-b135-ebfb7ff6aee6 node DatanodeRegistration(127.0.0.1:37439, datanodeUuid=96a83e5d-1cfc-4e5c-8b35-1c75888d684a, infoPort=42021, infoSecurePort=0, ipcPort=37913, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:19:46,804 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x458e91fa20b99434 with lease ID 0x710cf6d1703348be: Processing first storage report for DS-ae42e4e6-f764-4503-a0aa-62c83bafc0bf from datanode DatanodeRegistration(127.0.0.1:42413, datanodeUuid=b19de99b-07ed-4ea1-8bcd-830e280a3f10, infoPort=45535, infoSecurePort=0, ipcPort=44615, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532) 2024-12-07T12:19:46,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x458e91fa20b99434 with lease ID 0x710cf6d1703348be: from storage DS-ae42e4e6-f764-4503-a0aa-62c83bafc0bf node DatanodeRegistration(127.0.0.1:42413, datanodeUuid=b19de99b-07ed-4ea1-8bcd-830e280a3f10, infoPort=45535, infoSecurePort=0, ipcPort=44615, storageInfo=lv=-57;cid=testClusterID;nsid=1897191245;c=1733573984532), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:19:46,851 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877 2024-12-07T12:19:46,951 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/zookeeper_0, clientPort=60900, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:19:46,962 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60900 2024-12-07T12:19:46,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:46,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:47,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:19:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:19:47,692 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e with version=8 2024-12-07T12:19:47,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:19:47,792 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T12:19:48,050 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:19:48,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,066 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:19:48,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:19:48,219 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:19:48,280 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T12:19:48,288 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T12:19:48,292 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:19:48,319 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28870 (auto-detected) 2024-12-07T12:19:48,320 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T12:19:48,339 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40913 2024-12-07T12:19:48,360 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40913 connecting to ZooKeeper ensemble=127.0.0.1:60900 2024-12-07T12:19:48,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409130x0, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:19:48,397 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40913-0x1018cddb4fe0000 connected 2024-12-07T12:19:48,423 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:48,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:48,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:19:48,444 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e, hbase.cluster.distributed=false 2024-12-07T12:19:48,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:19:48,478 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40913 2024-12-07T12:19:48,478 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40913 2024-12-07T12:19:48,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40913 2024-12-07T12:19:48,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40913 2024-12-07T12:19:48,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40913 2024-12-07T12:19:48,595 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:19:48,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,598 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:19:48,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:19:48,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:19:48,601 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:19:48,603 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:19:48,604 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32959 2024-12-07T12:19:48,605 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32959 connecting to ZooKeeper ensemble=127.0.0.1:60900 2024-12-07T12:19:48,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:48,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:48,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329590x0, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:19:48,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329590x0, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:19:48,619 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32959-0x1018cddb4fe0001 connected 2024-12-07T12:19:48,625 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:19:48,633 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:19:48,636 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:19:48,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:19:48,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32959 2024-12-07T12:19:48,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32959 2024-12-07T12:19:48,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32959 2024-12-07T12:19:48,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32959 2024-12-07T12:19:48,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32959 2024-12-07T12:19:48,666 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:40913 2024-12-07T12:19:48,667 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:48,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:19:48,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:19:48,677 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:48,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:48,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:19:48,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:48,704 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:19:48,705 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,40913,1733573987844 from backup master directory 2024-12-07T12:19:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:19:48,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:19:48,711 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:19:48,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:48,713 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T12:19:48,715 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T12:19:48,774 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase.id] with ID: 291837e2-5b03-4683-84ce-a5d81bfde1c7 2024-12-07T12:19:48,774 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/.tmp/hbase.id 2024-12-07T12:19:48,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:19:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:19:48,788 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/.tmp/hbase.id]:[hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase.id] 2024-12-07T12:19:48,830 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:48,836 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:19:48,856 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-07T12:19:48,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:48,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:48,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:19:48,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:19:48,895 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:19:48,897 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:19:48,903 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:19:48,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:19:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:19:48,957 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store 2024-12-07T12:19:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:19:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:19:48,982 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T12:19:48,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:48,987 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:19:48,987 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:19:48,987 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:19:48,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:19:48,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:19:48,989 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:19:48,990 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733573988986Disabling compacts and flushes for region at 1733573988986Disabling writes for close at 1733573988989 (+3 ms)Writing region close event to WAL at 1733573988989Closed at 1733573988989 2024-12-07T12:19:48,992 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/.initializing 2024-12-07T12:19:48,992 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/WALs/27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:49,013 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C40913%2C1733573987844, suffix=, logDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/WALs/27c6fcd7dac8,40913,1733573987844, archiveDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/oldWALs, maxLogs=10 2024-12-07T12:19:49,027 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C40913%2C1733573987844.1733573989020 2024-12-07T12:19:49,050 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/WALs/27c6fcd7dac8,40913,1733573987844/27c6fcd7dac8%2C40913%2C1733573987844.1733573989020 2024-12-07T12:19:49,059 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:19:49,060 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:19:49,061 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:49,064 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,065 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,129 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:19:49,134 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:49,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:19:49,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:19:49,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:19:49,146 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:19:49,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:19:49,150 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:19:49,151 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,154 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,156 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,161 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,161 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,164 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:19:49,168 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:19:49,172 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:19:49,173 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868328, jitterRate=0.10413676500320435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:19:49,179 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733573989077Initializing all the Stores at 1733573989079 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573989079Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573989080 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573989080Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573989081 (+1 ms)Cleaning up temporary data from old regions at 1733573989161 (+80 ms)Region opened successfully at 1733573989179 (+18 ms) 2024-12-07T12:19:49,181 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:19:49,215 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45f17e06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:19:49,246 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:19:49,258 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:19:49,258 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:19:49,261 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:19:49,262 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T12:19:49,267 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-07T12:19:49,267 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:19:49,298 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:19:49,309 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:19:49,311 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:19:49,314 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:19:49,316 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:19:49,317 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:19:49,319 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:19:49,323 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:19:49,325 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:19:49,326 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:19:49,328 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:19:49,344 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:19:49,345 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:19:49,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:19:49,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:19:49,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,351 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,40913,1733573987844, sessionid=0x1018cddb4fe0000, setting cluster-up flag (Was=false) 2024-12-07T12:19:49,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,371 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:19:49,373 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:49,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:49,384 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:19:49,386 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:49,391 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:19:49,452 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(746): ClusterId : 291837e2-5b03-4683-84ce-a5d81bfde1c7 2024-12-07T12:19:49,454 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:19:49,459 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:19:49,460 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:19:49,464 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:19:49,464 DEBUG [RS:0;27c6fcd7dac8:32959 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ebe18c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:19:49,466 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:19:49,478 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:19:49,486 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:32959 2024-12-07T12:19:49,487 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:19:49,490 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:19:49,490 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:19:49,491 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:19:49,494 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,40913,1733573987844 with port=32959, startcode=1733573988556 2024-12-07T12:19:49,495 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,40913,1733573987844 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:19:49,503 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:19:49,503 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:19:49,504 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:19:49,504 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:19:49,504 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:19:49,504 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,504 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:19:49,505 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,508 DEBUG [RS:0;27c6fcd7dac8:32959 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:19:49,509 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574019509 2024-12-07T12:19:49,511 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:19:49,511 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:19:49,511 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:19:49,512 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:19:49,515 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:19:49,515 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:19:49,516 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:19:49,516 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:19:49,517 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,517 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:19:49,518 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,521 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:19:49,522 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:19:49,522 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:19:49,524 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:19:49,525 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:19:49,527 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733573989526,5,FailOnTimeoutGroup] 2024-12-07T12:19:49,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:19:49,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:19:49,532 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733573989527,5,FailOnTimeoutGroup] 2024-12-07T12:19:49,532 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,533 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:19:49,533 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:19:49,533 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e 2024-12-07T12:19:49,534 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,534 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:19:49,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:19:49,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:49,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:19:49,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:19:49,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:49,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:19:49,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:19:49,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:49,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:19:49,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:19:49,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:49,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:19:49,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:19:49,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:49,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:49,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:19:49,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740 2024-12-07T12:19:49,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740 2024-12-07T12:19:49,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:19:49,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:19:49,581 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:19:49,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:19:49,587 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:19:49,588 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826391, jitterRate=0.05081050097942352}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:19:49,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733573989552Initializing all the Stores at 1733573989554 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573989554Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573989556 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573989556Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573989556Cleaning up temporary data from old regions at 1733573989579 (+23 ms)Region opened successfully at 1733573989590 (+11 ms) 2024-12-07T12:19:49,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:19:49,590 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:19:49,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:19:49,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:19:49,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:19:49,595 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:19:49,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733573989590Disabling compacts and flushes for region at 1733573989590Disabling writes for close at 1733573989591 (+1 ms)Writing region close event to WAL at 1733573989594 (+3 ms)Closed at 1733573989595 (+1 ms) 2024-12-07T12:19:49,597 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39415, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:19:49,598 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:19:49,599 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:19:49,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40913 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,605 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40913 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:19:49,617 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:19:49,620 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:19:49,621 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e 2024-12-07T12:19:49,621 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35545 2024-12-07T12:19:49,621 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:19:49,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:19:49,627 DEBUG [RS:0;27c6fcd7dac8:32959 {}] zookeeper.ZKUtil(111): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,627 WARN [RS:0;27c6fcd7dac8:32959 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:19:49,627 INFO [RS:0;27c6fcd7dac8:32959 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:19:49,627 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,629 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,32959,1733573988556] 2024-12-07T12:19:49,654 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:19:49,668 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:19:49,673 INFO [RS:0;27c6fcd7dac8:32959 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:19:49,673 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,674 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:19:49,680 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:19:49,681 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,682 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,682 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,682 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,682 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,683 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,683 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:19:49,683 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,683 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,684 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,684 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,684 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,684 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:19:49,684 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:19:49,685 DEBUG [RS:0;27c6fcd7dac8:32959 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:19:49,686 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,686 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,687 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,687 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,687 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,687 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,32959,1733573988556-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:19:49,706 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:19:49,708 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,32959,1733573988556-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,709 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,709 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.Replication(171): 27c6fcd7dac8,32959,1733573988556 started 2024-12-07T12:19:49,727 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:49,728 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,32959,1733573988556, RpcServer on 27c6fcd7dac8/172.17.0.2:32959, sessionid=0x1018cddb4fe0001 2024-12-07T12:19:49,728 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:19:49,729 DEBUG [RS:0;27c6fcd7dac8:32959 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,729 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,32959,1733573988556' 2024-12-07T12:19:49,729 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:19:49,730 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:19:49,731 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:19:49,731 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:19:49,731 DEBUG [RS:0;27c6fcd7dac8:32959 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:49,731 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,32959,1733573988556' 2024-12-07T12:19:49,731 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:19:49,732 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:19:49,732 DEBUG [RS:0;27c6fcd7dac8:32959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:19:49,733 INFO [RS:0;27c6fcd7dac8:32959 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:19:49,733 INFO [RS:0;27c6fcd7dac8:32959 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:19:49,771 WARN [27c6fcd7dac8:40913 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:19:49,841 INFO [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C32959%2C1733573988556, suffix=, logDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556, archiveDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs, maxLogs=32 2024-12-07T12:19:49,844 INFO [RS:0;27c6fcd7dac8:32959 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 2024-12-07T12:19:49,853 INFO [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 2024-12-07T12:19:49,856 DEBUG [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:19:50,024 DEBUG [27c6fcd7dac8:40913 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:19:50,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:50,043 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,32959,1733573988556, state=OPENING 2024-12-07T12:19:50,048 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:19:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:19:50,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:19:50,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:19:50,051 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:19:50,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,32959,1733573988556}] 2024-12-07T12:19:50,228 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:19:50,232 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40333, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:19:50,246 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:19:50,247 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:19:50,252 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C32959%2C1733573988556.meta, suffix=.meta, logDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556, archiveDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs, maxLogs=32 2024-12-07T12:19:50,254 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.meta.1733573990254.meta 2024-12-07T12:19:50,263 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.meta.1733573990254.meta 2024-12-07T12:19:50,264 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:19:50,267 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:19:50,269 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:19:50,272 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:19:50,277 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:19:50,281 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:19:50,282 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:50,282 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:19:50,282 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:19:50,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:19:50,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:19:50,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:50,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:50,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:19:50,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:19:50,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:50,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:50,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:19:50,293 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:19:50,293 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:50,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:50,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:19:50,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:19:50,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:50,297 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:19:50,297 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:19:50,298 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740 2024-12-07T12:19:50,301 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740 2024-12-07T12:19:50,303 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:19:50,303 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:19:50,304 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:19:50,306 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:19:50,308 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794577, jitterRate=0.010356977581977844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:19:50,308 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:19:50,311 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733573990283Writing region info on filesystem at 1733573990283Initializing all the Stores at 1733573990285 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573990285Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573990285Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573990285Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733573990285Cleaning up temporary data from old regions at 1733573990303 (+18 ms)Running coprocessor post-open hooks at 1733573990309 (+6 ms)Region opened successfully at 1733573990310 (+1 ms) 2024-12-07T12:19:50,320 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733573990219 2024-12-07T12:19:50,334 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:19:50,335 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:19:50,337 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:50,339 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,32959,1733573988556, state=OPEN 2024-12-07T12:19:50,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:19:50,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:19:50,345 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:19:50,345 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:19:50,345 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:50,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:19:50,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,32959,1733573988556 in 292 msec 2024-12-07T12:19:50,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:19:50,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 747 msec 2024-12-07T12:19:50,359 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:19:50,359 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:19:50,382 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:19:50,383 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,32959,1733573988556, seqNum=-1] 2024-12-07T12:19:50,406 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:19:50,408 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43975, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:19:50,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0070 sec 2024-12-07T12:19:50,429 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733573990429, completionTime=-1 2024-12-07T12:19:50,432 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:19:50,432 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:19:50,460 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:19:50,460 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574050460 2024-12-07T12:19:50,460 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574110460 2024-12-07T12:19:50,460 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-07T12:19:50,463 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,463 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,463 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,465 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:40913, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,465 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,466 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,473 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:19:50,493 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.782sec 2024-12-07T12:19:50,494 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:19:50,495 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:19:50,496 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:19:50,497 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:19:50,497 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:19:50,498 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:19:50,499 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:19:50,507 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:19:50,508 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:19:50,508 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40913,1733573987844-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:19:50,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61c64a9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:19:50,570 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T12:19:50,570 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T12:19:50,574 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,40913,-1 for getting cluster id 2024-12-07T12:19:50,578 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:19:50,587 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '291837e2-5b03-4683-84ce-a5d81bfde1c7' 2024-12-07T12:19:50,589 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:19:50,590 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "291837e2-5b03-4683-84ce-a5d81bfde1c7" 2024-12-07T12:19:50,592 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d7423fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:19:50,592 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,40913,-1] 2024-12-07T12:19:50,595 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:19:50,596 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:19:50,598 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43228, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:19:50,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@231106e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:19:50,602 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:19:50,609 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,32959,1733573988556, seqNum=-1] 2024-12-07T12:19:50,610 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:19:50,613 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50680, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:19:50,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:50,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:19:50,644 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:19:50,648 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:19:50,654 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 27c6fcd7dac8,40913,1733573987844 2024-12-07T12:19:50,657 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@516150a3 2024-12-07T12:19:50,658 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:19:50,661 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60178, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:19:50,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T12:19:50,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T12:19:50,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:19:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-07T12:19:50,677 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:19:50,679 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-07T12:19:50,680 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:50,683 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:19:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:19:50,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741835_1011 (size=389) 2024-12-07T12:19:50,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741835_1011 (size=389) 2024-12-07T12:19:50,729 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 297b751a5a49e3f4948d5969018b2211, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e 2024-12-07T12:19:50,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741836_1012 (size=72) 2024-12-07T12:19:50,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741836_1012 (size=72) 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 297b751a5a49e3f4948d5969018b2211, disabling compactions & flushes 2024-12-07T12:19:50,740 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. after waiting 0 ms 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:50,740 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:50,740 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 297b751a5a49e3f4948d5969018b2211: Waiting for close lock at 1733573990740Disabling compacts and flushes for region at 1733573990740Disabling writes for close at 1733573990740Writing region close event to WAL at 1733573990740Closed at 1733573990740 2024-12-07T12:19:50,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:19:50,748 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733573990743"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733573990743"}]},"ts":"1733573990743"} 2024-12-07T12:19:50,754 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:19:50,756 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:19:50,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733573990756"}]},"ts":"1733573990756"} 2024-12-07T12:19:50,765 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-07T12:19:50,767 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=297b751a5a49e3f4948d5969018b2211, ASSIGN}] 2024-12-07T12:19:50,770 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=297b751a5a49e3f4948d5969018b2211, ASSIGN 2024-12-07T12:19:50,772 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=297b751a5a49e3f4948d5969018b2211, ASSIGN; state=OFFLINE, location=27c6fcd7dac8,32959,1733573988556; forceNewPlan=false, retain=false 2024-12-07T12:19:50,923 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=297b751a5a49e3f4948d5969018b2211, regionState=OPENING, regionLocation=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:50,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=297b751a5a49e3f4948d5969018b2211, ASSIGN because future has completed 2024-12-07T12:19:50,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 297b751a5a49e3f4948d5969018b2211, server=27c6fcd7dac8,32959,1733573988556}] 2024-12-07T12:19:51,091 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:51,091 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 297b751a5a49e3f4948d5969018b2211, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:19:51,091 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,092 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:19:51,092 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,092 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,094 INFO [StoreOpener-297b751a5a49e3f4948d5969018b2211-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,096 INFO [StoreOpener-297b751a5a49e3f4948d5969018b2211-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 297b751a5a49e3f4948d5969018b2211 columnFamilyName info 2024-12-07T12:19:51,096 DEBUG [StoreOpener-297b751a5a49e3f4948d5969018b2211-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:19:51,097 INFO [StoreOpener-297b751a5a49e3f4948d5969018b2211-1 {}] regionserver.HStore(327): Store=297b751a5a49e3f4948d5969018b2211/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:19:51,098 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,099 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,099 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,100 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,100 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,103 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,106 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:19:51,107 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 297b751a5a49e3f4948d5969018b2211; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833042, jitterRate=0.059268683195114136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:19:51,107 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:19:51,109 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 297b751a5a49e3f4948d5969018b2211: Running coprocessor pre-open hook at 1733573991092Writing region info on filesystem at 1733573991092Initializing all the Stores at 1733573991094 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733573991094Cleaning up temporary data from old regions at 1733573991100 (+6 ms)Running coprocessor post-open hooks at 1733573991107 (+7 ms)Region opened successfully at 1733573991108 (+1 ms) 2024-12-07T12:19:51,111 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211., pid=6, masterSystemTime=1733573991084 2024-12-07T12:19:51,115 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:51,115 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:19:51,117 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=297b751a5a49e3f4948d5969018b2211, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,32959,1733573988556 2024-12-07T12:19:51,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 297b751a5a49e3f4948d5969018b2211, server=27c6fcd7dac8,32959,1733573988556 because future has completed 2024-12-07T12:19:51,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:19:51,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 297b751a5a49e3f4948d5969018b2211, server=27c6fcd7dac8,32959,1733573988556 in 194 msec 2024-12-07T12:19:51,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:19:51,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=297b751a5a49e3f4948d5969018b2211, ASSIGN in 361 msec 2024-12-07T12:19:51,134 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:19:51,135 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733573991134"}]},"ts":"1733573991134"} 2024-12-07T12:19:51,139 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-07T12:19:51,140 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:19:51,144 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 471 msec 2024-12-07T12:19:55,785 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T12:19:55,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:19:55,869 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-07T12:19:58,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:19:58,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T12:19:58,280 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-07T12:19:58,280 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T12:19:58,281 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:19:58,282 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T12:19:58,282 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T12:19:58,282 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T12:20:00,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:20:00,768 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-07T12:20:00,771 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-07T12:20:00,777 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-07T12:20:00,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:20:00,778 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 2024-12-07T12:20:00,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:00,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:00,789 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:00,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:00,789 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:00,790 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 2024-12-07T12:20:00,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:20:00,791 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 is not closed yet, will try archiving it next time 2024-12-07T12:20:00,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741833_1009 (size=451) 2024-12-07T12:20:00,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741833_1009 (size=451) 2024-12-07T12:20:00,796 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733573989844 2024-12-07T12:20:00,800 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211., hostname=27c6fcd7dac8,32959,1733573988556, seqNum=2] 2024-12-07T12:20:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] regionserver.HRegion(8855): Flush requested on 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:20:12,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 297b751a5a49e3f4948d5969018b2211 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:20:12,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/5590d8d9dd6b40e99b8f558c23edae06 is 1080, key is row0001/info:/1733574000802/Put/seqid=0 2024-12-07T12:20:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741838_1014 (size=12509) 2024-12-07T12:20:12,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741838_1014 (size=12509) 2024-12-07T12:20:12,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/5590d8d9dd6b40e99b8f558c23edae06 2024-12-07T12:20:13,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/5590d8d9dd6b40e99b8f558c23edae06 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06 2024-12-07T12:20:13,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06, entries=7, sequenceid=11, filesize=12.2 K 2024-12-07T12:20:13,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 194ms, sequenceid=11, compaction requested=false 2024-12-07T12:20:13,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 297b751a5a49e3f4948d5969018b2211: 2024-12-07T12:20:16,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:20:20,850 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 2024-12-07T12:20:21,060 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK], DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK]] 2024-12-07T12:20:21,060 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:21,060 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:21,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:21,061 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:21,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:21,061 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 2024-12-07T12:20:21,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42021:42021),(127.0.0.1/127.0.0.1:45535:45535)] 2024-12-07T12:20:21,063 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 is not closed yet, will try archiving it next time 2024-12-07T12:20:21,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741837_1013 (size=12399) 2024-12-07T12:20:21,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741837_1013 (size=12399) 2024-12-07T12:20:21,268 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:23,472 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:25,676 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:27,880 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:27,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] regionserver.HRegion(8855): Flush requested on 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:20:27,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 297b751a5a49e3f4948d5969018b2211 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:20:28,083 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:28,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2813b088355f432db02c8d0391df72c5 is 1080, key is row0008/info:/1733574014839/Put/seqid=0 2024-12-07T12:20:28,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741840_1016 (size=12509) 2024-12-07T12:20:28,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741840_1016 (size=12509) 2024-12-07T12:20:28,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2813b088355f432db02c8d0391df72c5 2024-12-07T12:20:28,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2813b088355f432db02c8d0391df72c5 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5 2024-12-07T12:20:28,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5, entries=7, sequenceid=21, filesize=12.2 K 2024-12-07T12:20:28,317 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:28,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 436ms, sequenceid=21, compaction requested=false 2024-12-07T12:20:28,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 297b751a5a49e3f4948d5969018b2211: 2024-12-07T12:20:28,318 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-07T12:20:28,318 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:20:28,319 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06 because midkey is the same as first or last row 2024-12-07T12:20:30,084 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:30,517 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T12:20:30,517 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T12:20:32,288 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:32,290 WARN [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:32,291 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C32959%2C1733573988556:(num 1733574020850) roll requested 2024-12-07T12:20:32,292 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574032292 2024-12-07T12:20:32,500 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:32,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:32,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:32,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:32,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:32,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:32,502 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574032292 2024-12-07T12:20:32,503 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42021:42021),(127.0.0.1/127.0.0.1:45535:45535)] 2024-12-07T12:20:32,503 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 is not closed yet, will try archiving it next time 2024-12-07T12:20:32,503 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574000778 2024-12-07T12:20:32,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741839_1015 (size=7739) 2024-12-07T12:20:32,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741839_1015 (size=7739) 2024-12-07T12:20:34,492 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:36,092 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 297b751a5a49e3f4948d5969018b2211, had cached 0 bytes from a total of 25018 2024-12-07T12:20:36,696 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:38,900 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:41,105 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:43,107 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:20:43,108 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 2024-12-07T12:20:46,848 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:20:48,128 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5017 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:48,131 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5017 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:48,131 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C32959%2C1733573988556:(num 1733574043107) roll requested 2024-12-07T12:20:48,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:48,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:48,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:48,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:48,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:48,135 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574032292 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 2024-12-07T12:20:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741841_1017 (size=4753) 2024-12-07T12:20:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741841_1017 (size=4753) 2024-12-07T12:20:48,159 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42021:42021),(127.0.0.1/127.0.0.1:45535:45535)] 2024-12-07T12:20:48,160 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 2024-12-07T12:20:53,163 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:53,163 WARN [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] regionserver.HRegion(8855): Flush requested on 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:20:53,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 297b751a5a49e3f4948d5969018b2211 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:20:53,174 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:53,174 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:55,164 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:20:58,165 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:58,165 WARN [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK], DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK]] 2024-12-07T12:20:58,166 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:58,166 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:58,166 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:58,166 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:58,166 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:20:58,167 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 2024-12-07T12:20:58,167 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:20:58,167 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 is not closed yet, will try archiving it next time 2024-12-07T12:20:58,168 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C32959%2C1733573988556:(num 1733574048159) roll requested 2024-12-07T12:20:58,168 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574058168 2024-12-07T12:20:58,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741842_1018 (size=1569) 2024-12-07T12:20:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741842_1018 (size=1569) 2024-12-07T12:20:58,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2c1086fe476444d182dbc1b468625826 is 1080, key is row0015/info:/1733574029883/Put/seqid=0 2024-12-07T12:20:58,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741844_1020 (size=12509) 2024-12-07T12:20:58,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741844_1020 (size=12509) 2024-12-07T12:20:58,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2c1086fe476444d182dbc1b468625826 2024-12-07T12:20:58,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/2c1086fe476444d182dbc1b468625826 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826 2024-12-07T12:20:58,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826, entries=7, sequenceid=31, filesize=12.2 K 2024-12-07T12:21:03,175 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK], DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK]] 2024-12-07T12:21:03,175 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK], DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK]] 2024-12-07T12:21:03,207 INFO [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK], DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK]] 2024-12-07T12:21:03,207 WARN [FSHLog-0-hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e-prefix:27c6fcd7dac8,32959,1733573988556 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42413,DS-05fec530-141e-4a93-a5b6-d0700897592c,DISK], DatanodeInfoWithStorage[127.0.0.1:37439,DS-ed33608a-17d0-47fd-a9f0-b9e4074418d1,DISK]] 2024-12-07T12:21:03,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 10044ms, sequenceid=31, compaction requested=true 2024-12-07T12:21:03,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 297b751a5a49e3f4948d5969018b2211: 2024-12-07T12:21:03,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,207 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-07T12:21:03,207 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:03,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,207 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06 because midkey is the same as first or last row 2024-12-07T12:21:03,208 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,208 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,208 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574058168 2024-12-07T12:21:03,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 297b751a5a49e3f4948d5969018b2211:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:21:03,210 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42021:42021),(127.0.0.1/127.0.0.1:45535:45535)] 2024-12-07T12:21:03,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741843_1019 (size=438) 2024-12-07T12:21:03,210 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 is not closed yet, will try archiving it next time 2024-12-07T12:21:03,211 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C32959%2C1733573988556:(num 1733574058168) roll requested 2024-12-07T12:21:03,211 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574020850 2024-12-07T12:21:03,211 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574063211 2024-12-07T12:21:03,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741843_1019 (size=438) 2024-12-07T12:21:03,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:03,212 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:21:03,213 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574032292 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574032292 2024-12-07T12:21:03,214 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574043107 2024-12-07T12:21:03,215 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:21:03,216 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574048159 2024-12-07T12:21:03,217 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HStore(1541): 297b751a5a49e3f4948d5969018b2211/info is initiating minor compaction (all files) 2024-12-07T12:21:03,218 INFO [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 297b751a5a49e3f4948d5969018b2211/info in TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:03,218 INFO [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826] into tmpdir=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp, totalSize=36.6 K 2024-12-07T12:21:03,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,219 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5590d8d9dd6b40e99b8f558c23edae06, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733574000802 2024-12-07T12:21:03,219 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574058168 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574063211 2024-12-07T12:21:03,220 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2813b088355f432db02c8d0391df72c5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733574014839 2024-12-07T12:21:03,221 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c1086fe476444d182dbc1b468625826, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733574029883 2024-12-07T12:21:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741845_1021 (size=93) 2024-12-07T12:21:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741845_1021 (size=93) 2024-12-07T12:21:03,223 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574058168 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs/27c6fcd7dac8%2C32959%2C1733573988556.1733574058168 2024-12-07T12:21:03,231 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42021:42021),(127.0.0.1/127.0.0.1:45535:45535)] 2024-12-07T12:21:03,232 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C32959%2C1733573988556.1733574063231 2024-12-07T12:21:03,242 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,243 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,243 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:03,244 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574063211 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/WALs/27c6fcd7dac8,32959,1733573988556/27c6fcd7dac8%2C32959%2C1733573988556.1733574063231 2024-12-07T12:21:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741846_1022 (size=1258) 2024-12-07T12:21:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741846_1022 (size=1258) 2024-12-07T12:21:03,255 INFO [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 297b751a5a49e3f4948d5969018b2211#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:21:03,257 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/c66db849b9d84a3aa7e4c6269cb8339e is 1080, key is row0001/info:/1733574000802/Put/seqid=0 2024-12-07T12:21:03,258 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45535:45535),(127.0.0.1/127.0.0.1:42021:42021)] 2024-12-07T12:21:03,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741848_1024 (size=27710) 2024-12-07T12:21:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741848_1024 (size=27710) 2024-12-07T12:21:03,276 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/c66db849b9d84a3aa7e4c6269cb8339e as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/c66db849b9d84a3aa7e4c6269cb8339e 2024-12-07T12:21:03,292 INFO [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 297b751a5a49e3f4948d5969018b2211/info of 297b751a5a49e3f4948d5969018b2211 into c66db849b9d84a3aa7e4c6269cb8339e(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:21:03,293 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 297b751a5a49e3f4948d5969018b2211: 2024-12-07T12:21:03,294 INFO [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211., storeName=297b751a5a49e3f4948d5969018b2211/info, priority=13, startTime=1733574063209; duration=0sec 2024-12-07T12:21:03,294 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T12:21:03,294 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:03,294 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/c66db849b9d84a3aa7e4c6269cb8339e because midkey is the same as first or last row 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/c66db849b9d84a3aa7e4c6269cb8339e because midkey is the same as first or last row 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/c66db849b9d84a3aa7e4c6269cb8339e because midkey is the same as first or last row 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:03,295 DEBUG [RS:0;27c6fcd7dac8:32959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 297b751a5a49e3f4948d5969018b2211:info 2024-12-07T12:21:15,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32959 {}] regionserver.HRegion(8855): Flush requested on 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:21:15,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 297b751a5a49e3f4948d5969018b2211 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:21:15,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/03a00bb7899e4f9b8a5b5a280d1233ed is 1080, key is row0022/info:/1733574063233/Put/seqid=0 2024-12-07T12:21:15,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741849_1025 (size=12509) 2024-12-07T12:21:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741849_1025 (size=12509) 2024-12-07T12:21:15,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/03a00bb7899e4f9b8a5b5a280d1233ed 2024-12-07T12:21:15,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/03a00bb7899e4f9b8a5b5a280d1233ed as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/03a00bb7899e4f9b8a5b5a280d1233ed 2024-12-07T12:21:15,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/03a00bb7899e4f9b8a5b5a280d1233ed, entries=7, sequenceid=42, filesize=12.2 K 2024-12-07T12:21:15,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 35ms, sequenceid=42, compaction requested=false 2024-12-07T12:21:15,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 297b751a5a49e3f4948d5969018b2211: 2024-12-07T12:21:15,292 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-07T12:21:15,292 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:15,292 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/c66db849b9d84a3aa7e4c6269cb8339e because midkey is the same as first or last row 2024-12-07T12:21:16,848 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:21:21,092 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 297b751a5a49e3f4948d5969018b2211, had cached 0 bytes from a total of 40219 2024-12-07T12:21:23,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:21:23,267 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:21:23,267 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:23,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:23,273 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:23,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:21:23,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:21:23,273 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1053966274, stopped=false 2024-12-07T12:21:23,273 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,40913,1733573987844 2024-12-07T12:21:23,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:23,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:23,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:23,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:23,275 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:21:23,276 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:21:23,276 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:23,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:23,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:23,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:23,276 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(878): Closing user regions 2024-12-07T12:21:23,276 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,32959,1733573988556' ***** 2024-12-07T12:21:23,277 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:21:23,277 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(3091): Received CLOSE for 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:21:23,278 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 297b751a5a49e3f4948d5969018b2211, disabling compactions & flushes 2024-12-07T12:21:23,278 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:23,278 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:23,278 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. after waiting 0 ms 2024-12-07T12:21:23,278 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:23,278 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 297b751a5a49e3f4948d5969018b2211 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-07T12:21:23,279 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:21:23,279 INFO [RS:0;27c6fcd7dac8:32959 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:21:23,279 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:21:23,279 INFO [RS:0;27c6fcd7dac8:32959 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,32959,1733573988556 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:32959. 2024-12-07T12:21:23,280 DEBUG [RS:0;27c6fcd7dac8:32959 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:23,280 DEBUG [RS:0;27c6fcd7dac8:32959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:21:23,280 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:21:23,281 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T12:21:23,281 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 297b751a5a49e3f4948d5969018b2211=TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.} 2024-12-07T12:21:23,281 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:21:23,281 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:21:23,281 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:21:23,281 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:21:23,281 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:21:23,281 DEBUG [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 297b751a5a49e3f4948d5969018b2211 2024-12-07T12:21:23,281 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-07T12:21:23,284 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/cfcf557067444b88abde6029542aa997 is 1080, key is row0029/info:/1733574077258/Put/seqid=0 2024-12-07T12:21:23,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741850_1026 (size=8193) 2024-12-07T12:21:23,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741850_1026 (size=8193) 2024-12-07T12:21:23,293 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/cfcf557067444b88abde6029542aa997 2024-12-07T12:21:23,301 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/.tmp/info/cfcf557067444b88abde6029542aa997 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/cfcf557067444b88abde6029542aa997 2024-12-07T12:21:23,303 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/info/643782639353478eb529496cab96452e is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211./info:regioninfo/1733573991116/Put/seqid=0 2024-12-07T12:21:23,309 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/cfcf557067444b88abde6029542aa997, entries=3, sequenceid=48, filesize=8.0 K 2024-12-07T12:21:23,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741851_1027 (size=7016) 2024-12-07T12:21:23,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741851_1027 (size=7016) 2024-12-07T12:21:23,310 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/info/643782639353478eb529496cab96452e 2024-12-07T12:21:23,310 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 32ms, sequenceid=48, compaction requested=true 2024-12-07T12:21:23,311 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826] to archive 2024-12-07T12:21:23,314 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:21:23,317 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/5590d8d9dd6b40e99b8f558c23edae06 2024-12-07T12:21:23,319 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2813b088355f432db02c8d0391df72c5 2024-12-07T12:21:23,321 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826 to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/archive/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/info/2c1086fe476444d182dbc1b468625826 2024-12-07T12:21:23,334 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/ns/fb471cc8bbd042a4b013b61a23a827c3 is 43, key is default/ns:d/1733573990412/Put/seqid=0 2024-12-07T12:21:23,333 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=27c6fcd7dac8:40913 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T12:21:23,338 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5590d8d9dd6b40e99b8f558c23edae06=12509, 2813b088355f432db02c8d0391df72c5=12509, 2c1086fe476444d182dbc1b468625826=12509] 2024-12-07T12:21:23,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741852_1028 (size=5153) 2024-12-07T12:21:23,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741852_1028 (size=5153) 2024-12-07T12:21:23,342 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/ns/fb471cc8bbd042a4b013b61a23a827c3 2024-12-07T12:21:23,345 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/default/TestLogRolling-testSlowSyncLogRolling/297b751a5a49e3f4948d5969018b2211/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-07T12:21:23,348 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:23,348 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 297b751a5a49e3f4948d5969018b2211: Waiting for close lock at 1733574083277Running coprocessor pre-close hooks at 1733574083278 (+1 ms)Disabling compacts and flushes for region at 1733574083278Disabling writes for close at 1733574083278Obtaining lock to block concurrent updates at 1733574083278Preparing flush snapshotting stores in 297b751a5a49e3f4948d5969018b2211 at 1733574083278Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733574083279 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. at 1733574083280 (+1 ms)Flushing 297b751a5a49e3f4948d5969018b2211/info: creating writer at 1733574083280Flushing 297b751a5a49e3f4948d5969018b2211/info: appending metadata at 1733574083284 (+4 ms)Flushing 297b751a5a49e3f4948d5969018b2211/info: closing flushed file at 1733574083284Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@435dd0ea: reopening flushed file at 1733574083300 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 297b751a5a49e3f4948d5969018b2211 in 32ms, sequenceid=48, compaction requested=true at 1733574083310 (+10 ms)Writing region close event to WAL at 1733574083339 (+29 ms)Running coprocessor post-close hooks at 1733574083346 (+7 ms)Closed at 1733574083348 (+2 ms) 2024-12-07T12:21:23,349 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733573990662.297b751a5a49e3f4948d5969018b2211. 2024-12-07T12:21:23,367 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/table/f6be3d2e3eb246deaff22ee4f3d92f43 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733573991134/Put/seqid=0 2024-12-07T12:21:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741853_1029 (size=5396) 2024-12-07T12:21:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741853_1029 (size=5396) 2024-12-07T12:21:23,374 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/table/f6be3d2e3eb246deaff22ee4f3d92f43 2024-12-07T12:21:23,384 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/info/643782639353478eb529496cab96452e as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/info/643782639353478eb529496cab96452e 2024-12-07T12:21:23,391 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/info/643782639353478eb529496cab96452e, entries=10, sequenceid=11, filesize=6.9 K 2024-12-07T12:21:23,393 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/ns/fb471cc8bbd042a4b013b61a23a827c3 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/ns/fb471cc8bbd042a4b013b61a23a827c3 2024-12-07T12:21:23,400 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/ns/fb471cc8bbd042a4b013b61a23a827c3, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T12:21:23,401 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/.tmp/table/f6be3d2e3eb246deaff22ee4f3d92f43 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/table/f6be3d2e3eb246deaff22ee4f3d92f43 2024-12-07T12:21:23,409 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/table/f6be3d2e3eb246deaff22ee4f3d92f43, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T12:21:23,411 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-12-07T12:21:23,417 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T12:21:23,417 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:21:23,418 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:23,418 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574083281Running coprocessor pre-close hooks at 1733574083281Disabling compacts and flushes for region at 1733574083281Disabling writes for close at 1733574083281Obtaining lock to block concurrent updates at 1733574083281Preparing flush snapshotting stores in 1588230740 at 1733574083281Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733574083281Flushing stores of hbase:meta,,1.1588230740 at 1733574083282 (+1 ms)Flushing 1588230740/info: creating writer at 1733574083283 (+1 ms)Flushing 1588230740/info: appending metadata at 1733574083302 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733574083303 (+1 ms)Flushing 1588230740/ns: creating writer at 1733574083318 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733574083334 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733574083334Flushing 1588230740/table: creating writer at 1733574083350 (+16 ms)Flushing 1588230740/table: appending metadata at 1733574083366 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733574083366Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71f1ffcc: reopening flushed file at 1733574083382 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e44a8f4: reopening flushed file at 1733574083392 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15988ae7: reopening flushed file at 1733574083401 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1733574083411 (+10 ms)Writing region close event to WAL at 1733574083412 (+1 ms)Running coprocessor post-close hooks at 1733574083417 (+5 ms)Closed at 1733574083418 (+1 ms) 2024-12-07T12:21:23,418 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:23,481 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,32959,1733573988556; all regions closed. 2024-12-07T12:21:23,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,483 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,483 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,483 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,484 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741834_1010 (size=3066) 2024-12-07T12:21:23,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741834_1010 (size=3066) 2024-12-07T12:21:23,490 DEBUG [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs 2024-12-07T12:21:23,490 INFO [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C32959%2C1733573988556.meta:.meta(num 1733573990254) 2024-12-07T12:21:23,491 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,491 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,491 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,491 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,491 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741847_1023 (size=12695) 2024-12-07T12:21:23,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741847_1023 (size=12695) 2024-12-07T12:21:23,498 DEBUG [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/oldWALs 2024-12-07T12:21:23,498 INFO [RS:0;27c6fcd7dac8:32959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C32959%2C1733573988556:(num 1733574063231) 2024-12-07T12:21:23,498 DEBUG [RS:0;27c6fcd7dac8:32959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:23,498 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:21:23,498 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:21:23,499 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:21:23,499 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:21:23,499 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:21:23,500 INFO [RS:0;27c6fcd7dac8:32959 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32959 2024-12-07T12:21:23,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,32959,1733573988556 2024-12-07T12:21:23,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:21:23,504 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:21:23,506 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,32959,1733573988556] 2024-12-07T12:21:23,508 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,32959,1733573988556 already deleted, retry=false 2024-12-07T12:21:23,509 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,32959,1733573988556 expired; onlineServers=0 2024-12-07T12:21:23,509 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,40913,1733573987844' ***** 2024-12-07T12:21:23,509 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:21:23,509 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:21:23,509 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:21:23,509 DEBUG [M:0;27c6fcd7dac8:40913 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:21:23,509 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:21:23,509 DEBUG [M:0;27c6fcd7dac8:40913 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:21:23,509 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733573989526 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733573989526,5,FailOnTimeoutGroup] 2024-12-07T12:21:23,509 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733573989527 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733573989527,5,FailOnTimeoutGroup] 2024-12-07T12:21:23,510 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:21:23,510 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:21:23,510 DEBUG [M:0;27c6fcd7dac8:40913 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:21:23,510 INFO [M:0;27c6fcd7dac8:40913 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:21:23,510 INFO [M:0;27c6fcd7dac8:40913 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:21:23,511 INFO [M:0;27c6fcd7dac8:40913 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:21:23,511 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:21:23,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:21:23,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:23,512 DEBUG [M:0;27c6fcd7dac8:40913 {}] zookeeper.ZKUtil(347): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:21:23,512 WARN [M:0;27c6fcd7dac8:40913 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:21:23,512 INFO [M:0;27c6fcd7dac8:40913 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/.lastflushedseqids 2024-12-07T12:21:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741854_1030 (size=130) 2024-12-07T12:21:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741854_1030 (size=130) 2024-12-07T12:21:23,526 INFO [M:0;27c6fcd7dac8:40913 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:21:23,526 INFO [M:0;27c6fcd7dac8:40913 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:21:23,526 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:21:23,526 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:23,526 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:23,527 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:21:23,527 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:23,527 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-12-07T12:21:23,546 DEBUG [M:0;27c6fcd7dac8:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3a70c7539e94864adb77a330908e1a2 is 82, key is hbase:meta,,1/info:regioninfo/1733573990336/Put/seqid=0 2024-12-07T12:21:23,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741855_1031 (size=5672) 2024-12-07T12:21:23,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741855_1031 (size=5672) 2024-12-07T12:21:23,553 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3a70c7539e94864adb77a330908e1a2 2024-12-07T12:21:23,584 DEBUG [M:0;27c6fcd7dac8:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84b35e0038f34b7d81d9f0d851a43716 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733573991143/Put/seqid=0 2024-12-07T12:21:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741856_1032 (size=6248) 2024-12-07T12:21:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741856_1032 (size=6248) 2024-12-07T12:21:23,592 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84b35e0038f34b7d81d9f0d851a43716 2024-12-07T12:21:23,600 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84b35e0038f34b7d81d9f0d851a43716 2024-12-07T12:21:23,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:23,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32959-0x1018cddb4fe0001, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:23,607 INFO [RS:0;27c6fcd7dac8:32959 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:21:23,607 INFO [RS:0;27c6fcd7dac8:32959 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,32959,1733573988556; zookeeper connection closed. 2024-12-07T12:21:23,607 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@702438a7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@702438a7 2024-12-07T12:21:23,608 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:21:23,623 DEBUG [M:0;27c6fcd7dac8:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f32de7d55e242c3a24a1490502f7632 is 69, key is 27c6fcd7dac8,32959,1733573988556/rs:state/1733573989607/Put/seqid=0 2024-12-07T12:21:23,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741857_1033 (size=5156) 2024-12-07T12:21:23,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741857_1033 (size=5156) 2024-12-07T12:21:23,630 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f32de7d55e242c3a24a1490502f7632 2024-12-07T12:21:23,658 DEBUG [M:0;27c6fcd7dac8:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7260944250e64537b1cabea0a94d5a81 is 52, key is load_balancer_on/state:d/1733573990640/Put/seqid=0 2024-12-07T12:21:23,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741858_1034 (size=5056) 2024-12-07T12:21:23,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741858_1034 (size=5056) 2024-12-07T12:21:23,665 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7260944250e64537b1cabea0a94d5a81 2024-12-07T12:21:23,673 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d3a70c7539e94864adb77a330908e1a2 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3a70c7539e94864adb77a330908e1a2 2024-12-07T12:21:23,679 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d3a70c7539e94864adb77a330908e1a2, entries=8, sequenceid=59, filesize=5.5 K 2024-12-07T12:21:23,680 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/84b35e0038f34b7d81d9f0d851a43716 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/84b35e0038f34b7d81d9f0d851a43716 2024-12-07T12:21:23,686 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84b35e0038f34b7d81d9f0d851a43716 2024-12-07T12:21:23,686 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/84b35e0038f34b7d81d9f0d851a43716, entries=6, sequenceid=59, filesize=6.1 K 2024-12-07T12:21:23,687 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f32de7d55e242c3a24a1490502f7632 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f32de7d55e242c3a24a1490502f7632 2024-12-07T12:21:23,691 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:21:23,692 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f32de7d55e242c3a24a1490502f7632, entries=1, sequenceid=59, filesize=5.0 K 2024-12-07T12:21:23,693 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7260944250e64537b1cabea0a94d5a81 as hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7260944250e64537b1cabea0a94d5a81 2024-12-07T12:21:23,699 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7260944250e64537b1cabea0a94d5a81, entries=1, sequenceid=59, filesize=4.9 K 2024-12-07T12:21:23,700 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=59, compaction requested=false 2024-12-07T12:21:23,701 INFO [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:23,702 DEBUG [M:0;27c6fcd7dac8:40913 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574083526Disabling compacts and flushes for region at 1733574083526Disabling writes for close at 1733574083527 (+1 ms)Obtaining lock to block concurrent updates at 1733574083527Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574083527Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1733574083528 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574083529 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574083529Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574083545 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574083545Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574083560 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574083583 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574083583Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574083600 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574083622 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574083622Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574083638 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574083657 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574083657Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46bcb42f: reopening flushed file at 1733574083672 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4161cbc4: reopening flushed file at 1733574083679 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e1fd1a1: reopening flushed file at 1733574083686 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@756cb8c1: reopening flushed file at 1733574083692 (+6 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=59, compaction requested=false at 1733574083700 (+8 ms)Writing region close event to WAL at 1733574083701 (+1 ms)Closed at 1733574083701 2024-12-07T12:21:23,702 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,703 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,703 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:23,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37439 is added to blk_1073741830_1006 (size=27985) 2024-12-07T12:21:23,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741830_1006 (size=27985) 2024-12-07T12:21:23,706 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:21:23,706 INFO [M:0;27c6fcd7dac8:40913 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:21:23,706 INFO [M:0;27c6fcd7dac8:40913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40913 2024-12-07T12:21:23,707 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:21:23,809 INFO [M:0;27c6fcd7dac8:40913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:21:23,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:23,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x1018cddb4fe0000, quorum=127.0.0.1:60900, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:23,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:23,815 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:23,816 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:23,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:23,816 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:23,819 WARN [BP-1537761316-172.17.0.2-1733573984532 heartbeating to localhost/127.0.0.1:35545 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:23,819 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:23,819 WARN [BP-1537761316-172.17.0.2-1733573984532 heartbeating to localhost/127.0.0.1:35545 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1537761316-172.17.0.2-1733573984532 (Datanode Uuid 96a83e5d-1cfc-4e5c-8b35-1c75888d684a) service to localhost/127.0.0.1:35545 2024-12-07T12:21:23,819 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:23,820 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data3/current/BP-1537761316-172.17.0.2-1733573984532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:23,820 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data4/current/BP-1537761316-172.17.0.2-1733573984532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:23,821 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:23,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:23,823 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:23,823 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:23,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:23,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:23,825 WARN [BP-1537761316-172.17.0.2-1733573984532 heartbeating to localhost/127.0.0.1:35545 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:23,825 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:23,825 WARN [BP-1537761316-172.17.0.2-1733573984532 heartbeating to localhost/127.0.0.1:35545 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1537761316-172.17.0.2-1733573984532 (Datanode Uuid b19de99b-07ed-4ea1-8bcd-830e280a3f10) service to localhost/127.0.0.1:35545 2024-12-07T12:21:23,825 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:23,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data1/current/BP-1537761316-172.17.0.2-1733573984532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:23,826 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/cluster_7ab8ef0f-a838-5302-ca31-79cab7330b02/data/data2/current/BP-1537761316-172.17.0.2-1733573984532 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:23,826 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:23,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:21:23,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:23,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:23,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:23,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:23,845 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:21:23,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:21:23,884 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=79 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35545 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35545 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35545 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: master/27c6fcd7dac8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/27c6fcd7dac8:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35545 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35545 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35545 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/27c6fcd7dac8:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35545 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7b8013cb java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35545 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=129 (was 287), ProcessCount=11 (was 11), AvailableMemoryMB=6311 (was 6987) 2024-12-07T12:21:23,890 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=129, ProcessCount=11, AvailableMemoryMB=6311 2024-12-07T12:21:23,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.log.dir so I do NOT create it in target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b44dcc53-3031-a1bc-f444-81b451929877/hadoop.tmp.dir so I do NOT create it in target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40, deleteOnExit=true 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/test.cache.data in system properties and HBase conf 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:21:23,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:21:23,892 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:21:23,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:21:23,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:21:23,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:21:23,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:21:23,906 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:21:23,987 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:23,993 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:23,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:23,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:23,994 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:23,994 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:23,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:23,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:24,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/java.io.tmpdir/jetty-localhost-45055-hadoop-hdfs-3_4_1-tests_jar-_-any-1965283960662057149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:21:24,114 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:45055} 2024-12-07T12:21:24,114 INFO [Time-limited test {}] server.Server(415): Started @101532ms 2024-12-07T12:21:24,128 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:21:24,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:24,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:24,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:24,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:24,205 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:21:24,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:24,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:24,321 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36505daf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/java.io.tmpdir/jetty-localhost-43995-hadoop-hdfs-3_4_1-tests_jar-_-any-631178699675629526/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:24,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:43995} 2024-12-07T12:21:24,321 INFO [Time-limited test {}] server.Server(415): Started @101739ms 2024-12-07T12:21:24,323 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:24,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:24,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:24,367 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:24,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:24,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:24,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:24,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:24,454 WARN [Thread-440 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data2/current/BP-1911068295-172.17.0.2-1733574083925/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:24,454 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data1/current/BP-1911068295-172.17.0.2-1733574083925/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:24,473 WARN [Thread-418 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:24,476 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19ad86ee58a5605d with lease ID 0xb24d439b36993784: Processing first storage report for DS-d1bbd60c-cf8c-4e46-a42f-e09f81e4396b from datanode DatanodeRegistration(127.0.0.1:41121, datanodeUuid=6cd48622-6547-47b4-9e9e-bdb487a10b31, infoPort=38907, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925) 2024-12-07T12:21:24,476 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19ad86ee58a5605d with lease ID 0xb24d439b36993784: from storage DS-d1bbd60c-cf8c-4e46-a42f-e09f81e4396b node DatanodeRegistration(127.0.0.1:41121, datanodeUuid=6cd48622-6547-47b4-9e9e-bdb487a10b31, infoPort=38907, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:24,476 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x19ad86ee58a5605d with lease ID 0xb24d439b36993784: Processing first storage report for DS-efa35edd-dd6e-426e-a5de-bb140515e2dd from datanode DatanodeRegistration(127.0.0.1:41121, datanodeUuid=6cd48622-6547-47b4-9e9e-bdb487a10b31, infoPort=38907, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925) 2024-12-07T12:21:24,476 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19ad86ee58a5605d with lease ID 0xb24d439b36993784: from storage DS-efa35edd-dd6e-426e-a5de-bb140515e2dd node DatanodeRegistration(127.0.0.1:41121, datanodeUuid=6cd48622-6547-47b4-9e9e-bdb487a10b31, infoPort=38907, infoSecurePort=0, ipcPort=34361, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:24,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@af33574{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/java.io.tmpdir/jetty-localhost-46599-hadoop-hdfs-3_4_1-tests_jar-_-any-9397118539260154484/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:24,503 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:46599} 2024-12-07T12:21:24,503 INFO [Time-limited test {}] server.Server(415): Started @101920ms 2024-12-07T12:21:24,504 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:24,603 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data3/current/BP-1911068295-172.17.0.2-1733574083925/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:24,604 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data4/current/BP-1911068295-172.17.0.2-1733574083925/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:24,624 WARN [Thread-454 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x905034b8c8f6ebd4 with lease ID 0xb24d439b36993785: Processing first storage report for DS-b3d74116-7402-40d1-ac26-568d7ac56679 from datanode DatanodeRegistration(127.0.0.1:36455, datanodeUuid=a22ce972-bbb9-4038-99a5-7de959bc5a3b, infoPort=36735, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925) 2024-12-07T12:21:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x905034b8c8f6ebd4 with lease ID 0xb24d439b36993785: from storage DS-b3d74116-7402-40d1-ac26-568d7ac56679 node DatanodeRegistration(127.0.0.1:36455, datanodeUuid=a22ce972-bbb9-4038-99a5-7de959bc5a3b, infoPort=36735, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:21:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x905034b8c8f6ebd4 with lease ID 0xb24d439b36993785: Processing first storage report for DS-8851c9dd-b7aa-45b9-b27d-7d31decee584 from datanode DatanodeRegistration(127.0.0.1:36455, datanodeUuid=a22ce972-bbb9-4038-99a5-7de959bc5a3b, infoPort=36735, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925) 2024-12-07T12:21:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x905034b8c8f6ebd4 with lease ID 0xb24d439b36993785: from storage DS-8851c9dd-b7aa-45b9-b27d-7d31decee584 node DatanodeRegistration(127.0.0.1:36455, datanodeUuid=a22ce972-bbb9-4038-99a5-7de959bc5a3b, infoPort=36735, infoSecurePort=0, ipcPort=40143, storageInfo=lv=-57;cid=testClusterID;nsid=42753866;c=1733574083925), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:24,633 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4 2024-12-07T12:21:24,635 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/zookeeper_0, clientPort=54849, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:21:24,636 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54849 2024-12-07T12:21:24,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,638 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:21:24,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:21:24,651 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68 with version=8 2024-12-07T12:21:24,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:21:24,653 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:21:24,653 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,653 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,654 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:21:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,654 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:21:24,654 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:21:24,654 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:21:24,655 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40779 2024-12-07T12:21:24,656 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40779 connecting to ZooKeeper ensemble=127.0.0.1:54849 2024-12-07T12:21:24,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:407790x0, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:21:24,662 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40779-0x1018cdf32aa0000 connected 2024-12-07T12:21:24,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:24,683 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68, hbase.cluster.distributed=false 2024-12-07T12:21:24,684 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:21:24,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40779 2024-12-07T12:21:24,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40779 2024-12-07T12:21:24,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40779 2024-12-07T12:21:24,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40779 2024-12-07T12:21:24,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40779 2024-12-07T12:21:24,703 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:21:24,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,703 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:21:24,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:24,703 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:21:24,704 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:21:24,704 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:21:24,704 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42095 2024-12-07T12:21:24,706 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42095 connecting to ZooKeeper ensemble=127.0.0.1:54849 2024-12-07T12:21:24,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:420950x0, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:21:24,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:420950x0, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:24,714 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42095-0x1018cdf32aa0001 connected 2024-12-07T12:21:24,714 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:21:24,715 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:21:24,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:21:24,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:21:24,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42095 2024-12-07T12:21:24,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42095 2024-12-07T12:21:24,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42095 2024-12-07T12:21:24,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42095 2024-12-07T12:21:24,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42095 2024-12-07T12:21:24,734 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:40779 2024-12-07T12:21:24,734 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:24,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:24,737 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:21:24,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,739 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:21:24,739 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,40779,1733574084653 from backup master directory 2024-12-07T12:21:24,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:24,742 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:21:24,742 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:24,748 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/hbase.id] with ID: dade928d-b7af-480d-9d71-91a43fb1e49f 2024-12-07T12:21:24,748 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/.tmp/hbase.id 2024-12-07T12:21:24,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:21:24,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:21:24,757 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/.tmp/hbase.id]:[hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/hbase.id] 2024-12-07T12:21:24,773 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:24,774 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:21:24,776 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T12:21:24,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:21:24,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:21:24,799 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:21:24,800 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:21:24,801 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:21:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:21:24,818 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store 2024-12-07T12:21:24,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:21:24,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:21:24,840 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:24,840 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:21:24,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:24,840 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:24,840 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:21:24,840 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:24,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:24,841 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574084840Disabling compacts and flushes for region at 1733574084840Disabling writes for close at 1733574084840Writing region close event to WAL at 1733574084840Closed at 1733574084840 2024-12-07T12:21:24,842 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/.initializing 2024-12-07T12:21:24,842 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/WALs/27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,846 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C40779%2C1733574084653, suffix=, logDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/WALs/27c6fcd7dac8,40779,1733574084653, archiveDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/oldWALs, maxLogs=10 2024-12-07T12:21:24,847 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C40779%2C1733574084653.1733574084846 2024-12-07T12:21:24,854 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/WALs/27c6fcd7dac8,40779,1733574084653/27c6fcd7dac8%2C40779%2C1733574084653.1733574084846 2024-12-07T12:21:24,859 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36735:36735),(127.0.0.1/127.0.0.1:38907:38907)] 2024-12-07T12:21:24,862 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:21:24,862 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:24,862 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,862 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:21:24,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:24,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:21:24,869 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:24,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:21:24,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:24,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:21:24,875 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:24,876 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,877 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,877 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,879 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,879 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,880 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:21:24,882 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:24,889 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:21:24,890 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=816414, jitterRate=0.03812488913536072}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:21:24,891 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574084862Initializing all the Stores at 1733574084864 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574084864Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574084864Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574084864Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574084864Cleaning up temporary data from old regions at 1733574084879 (+15 ms)Region opened successfully at 1733574084891 (+12 ms) 2024-12-07T12:21:24,891 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:21:24,898 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77455df1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:21:24,899 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:21:24,900 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:21:24,900 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:21:24,900 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:21:24,901 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:21:24,901 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:21:24,901 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:21:24,904 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:21:24,905 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:21:24,907 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:21:24,907 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:21:24,908 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:21:24,909 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:21:24,910 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:21:24,911 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:21:24,914 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:21:24,915 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:21:24,917 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:21:24,920 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:21:24,921 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:21:24,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:24,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:24,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,923 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,40779,1733574084653, sessionid=0x1018cdf32aa0000, setting cluster-up flag (Was=false) 2024-12-07T12:21:24,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,933 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:21:24,934 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:24,943 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:21:24,945 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:24,946 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:21:24,949 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:24,949 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:21:24,950 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:21:24,950 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,40779,1733574084653 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:21:24,951 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:21:24,952 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:24,953 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574114953 2024-12-07T12:21:24,953 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:21:24,953 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:21:24,954 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:21:24,954 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:21:24,954 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:21:24,954 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:21:24,954 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:24,954 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:24,954 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:21:24,955 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:21:24,955 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:21:24,955 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:21:24,955 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:21:24,955 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:21:24,956 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574084956,5,FailOnTimeoutGroup] 2024-12-07T12:21:24,956 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,956 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574084956,5,FailOnTimeoutGroup] 2024-12-07T12:21:24,956 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:24,956 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:21:24,956 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:24,956 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:24,956 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:21:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:21:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:21:24,966 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:21:24,966 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68 2024-12-07T12:21:24,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:21:24,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:21:24,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:24,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:21:24,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:21:24,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:24,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:21:24,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:21:24,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:24,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:21:24,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:21:24,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:24,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:21:24,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:21:24,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:24,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:24,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:21:24,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740 2024-12-07T12:21:24,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740 2024-12-07T12:21:24,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:21:24,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:21:24,992 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:21:24,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:21:24,997 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:21:24,997 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703417, jitterRate=-0.10555946826934814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:21:25,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574084977Initializing all the Stores at 1733574084978 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574084978Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574084978Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574084978Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574084978Cleaning up temporary data from old regions at 1733574084992 (+14 ms)Region opened successfully at 1733574084999 (+7 ms) 2024-12-07T12:21:25,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:21:25,000 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:21:25,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:21:25,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:21:25,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:21:25,000 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:25,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574085000Disabling compacts and flushes for region at 1733574085000Disabling writes for close at 1733574085000Writing region close event to WAL at 1733574085000Closed at 1733574085000 2024-12-07T12:21:25,003 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:25,003 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:21:25,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:21:25,005 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:21:25,007 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:21:25,022 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(746): ClusterId : dade928d-b7af-480d-9d71-91a43fb1e49f 2024-12-07T12:21:25,023 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:21:25,025 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:21:25,025 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:21:25,027 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:21:25,027 DEBUG [RS:0;27c6fcd7dac8:42095 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cdc4eea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:21:25,045 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:42095 2024-12-07T12:21:25,046 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:21:25,046 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:21:25,046 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:21:25,049 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,40779,1733574084653 with port=42095, startcode=1733574084703 2024-12-07T12:21:25,049 DEBUG [RS:0;27c6fcd7dac8:42095 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:21:25,053 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58283, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:21:25,054 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40779 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,054 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40779 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,058 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68 2024-12-07T12:21:25,058 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36683 2024-12-07T12:21:25,058 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:21:25,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:21:25,062 DEBUG [RS:0;27c6fcd7dac8:42095 {}] zookeeper.ZKUtil(111): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,062 WARN [RS:0;27c6fcd7dac8:42095 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:21:25,062 INFO [RS:0;27c6fcd7dac8:42095 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:25,062 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,42095,1733574084703] 2024-12-07T12:21:25,063 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/WALs/27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,069 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:21:25,073 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:21:25,074 INFO [RS:0;27c6fcd7dac8:42095 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:21:25,074 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,076 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:21:25,077 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:21:25,078 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,078 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,079 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,079 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,079 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:25,079 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:25,079 DEBUG [RS:0;27c6fcd7dac8:42095 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:25,084 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,084 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,084 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,084 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,084 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,085 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,42095,1733574084703-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:21:25,104 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:21:25,104 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,42095,1733574084703-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,105 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,105 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.Replication(171): 27c6fcd7dac8,42095,1733574084703 started 2024-12-07T12:21:25,120 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,121 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,42095,1733574084703, RpcServer on 27c6fcd7dac8/172.17.0.2:42095, sessionid=0x1018cdf32aa0001 2024-12-07T12:21:25,121 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:21:25,121 DEBUG [RS:0;27c6fcd7dac8:42095 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,121 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,42095,1733574084703' 2024-12-07T12:21:25,121 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,42095,1733574084703' 2024-12-07T12:21:25,122 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:21:25,123 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:21:25,123 DEBUG [RS:0;27c6fcd7dac8:42095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:21:25,124 INFO [RS:0;27c6fcd7dac8:42095 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:21:25,124 INFO [RS:0;27c6fcd7dac8:42095 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:21:25,157 WARN [27c6fcd7dac8:40779 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:21:25,226 INFO [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C42095%2C1733574084703, suffix=, logDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/WALs/27c6fcd7dac8,42095,1733574084703, archiveDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/oldWALs, maxLogs=32 2024-12-07T12:21:25,228 INFO [RS:0;27c6fcd7dac8:42095 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C42095%2C1733574084703.1733574085228 2024-12-07T12:21:25,234 INFO [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/WALs/27c6fcd7dac8,42095,1733574084703/27c6fcd7dac8%2C42095%2C1733574084703.1733574085228 2024-12-07T12:21:25,235 DEBUG [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38907:38907),(127.0.0.1/127.0.0.1:36735:36735)] 2024-12-07T12:21:25,408 DEBUG [27c6fcd7dac8:40779 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:21:25,408 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,410 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,42095,1733574084703, state=OPENING 2024-12-07T12:21:25,412 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:21:25,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:25,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:25,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:25,414 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:21:25,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:25,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,42095,1733574084703}] 2024-12-07T12:21:25,569 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:21:25,572 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:21:25,577 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:21:25,577 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:25,579 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C42095%2C1733574084703.meta, suffix=.meta, logDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/WALs/27c6fcd7dac8,42095,1733574084703, archiveDir=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/oldWALs, maxLogs=32 2024-12-07T12:21:25,581 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C42095%2C1733574084703.meta.1733574085581.meta 2024-12-07T12:21:25,589 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/WALs/27c6fcd7dac8,42095,1733574084703/27c6fcd7dac8%2C42095%2C1733574084703.meta.1733574085581.meta 2024-12-07T12:21:25,590 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36735:36735),(127.0.0.1/127.0.0.1:38907:38907)] 2024-12-07T12:21:25,591 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:21:25,591 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:21:25,592 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:21:25,592 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:21:25,592 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:21:25,592 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:25,592 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:21:25,592 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:21:25,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:21:25,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:21:25,595 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:25,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:25,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:21:25,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:21:25,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:25,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:25,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:21:25,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:21:25,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:25,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:25,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:21:25,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:21:25,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:25,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:25,602 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:21:25,603 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740 2024-12-07T12:21:25,605 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740 2024-12-07T12:21:25,607 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:21:25,607 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:21:25,608 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:21:25,609 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:21:25,610 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809351, jitterRate=0.02914382517337799}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:21:25,610 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:21:25,611 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574085592Writing region info on filesystem at 1733574085592Initializing all the Stores at 1733574085593 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574085593Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574085594 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574085594Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574085594Cleaning up temporary data from old regions at 1733574085607 (+13 ms)Running coprocessor post-open hooks at 1733574085610 (+3 ms)Region opened successfully at 1733574085611 (+1 ms) 2024-12-07T12:21:25,613 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574085568 2024-12-07T12:21:25,616 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:21:25,616 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:21:25,617 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,618 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,42095,1733574084703, state=OPEN 2024-12-07T12:21:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:21:25,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:21:25,624 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:25,624 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:25,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:21:25,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,42095,1733574084703 in 209 msec 2024-12-07T12:21:25,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:21:25,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-12-07T12:21:25,633 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:25,633 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:21:25,635 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:21:25,635 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,42095,1733574084703, seqNum=-1] 2024-12-07T12:21:25,635 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:21:25,637 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47019, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:21:25,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 696 msec 2024-12-07T12:21:25,645 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574085645, completionTime=-1 2024-12-07T12:21:25,645 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:21:25,645 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:21:25,647 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:21:25,647 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574145647 2024-12-07T12:21:25,647 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574205647 2024-12-07T12:21:25,647 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:40779, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,648 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,650 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.911sec 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:21:25,653 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:21:25,656 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:21:25,656 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:21:25,656 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,40779,1733574084653-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:25,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d607c51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:25,723 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,40779,-1 for getting cluster id 2024-12-07T12:21:25,723 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:21:25,725 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dade928d-b7af-480d-9d71-91a43fb1e49f' 2024-12-07T12:21:25,726 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:21:25,726 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dade928d-b7af-480d-9d71-91a43fb1e49f" 2024-12-07T12:21:25,726 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047af98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:25,726 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,40779,-1] 2024-12-07T12:21:25,727 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:21:25,727 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,728 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33952, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:21:25,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b9ee8da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:25,730 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:21:25,731 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,42095,1733574084703, seqNum=-1] 2024-12-07T12:21:25,731 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:21:25,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:21:25,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:25,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:25,739 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:21:25,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:21:25,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:21:25,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:25,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,740 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:21:25,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:21:25,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=215330652, stopped=false 2024-12-07T12:21:25,740 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,40779,1733574084653 2024-12-07T12:21:25,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:25,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:25,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:25,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:25,742 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:21:25,743 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:21:25,743 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:25,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:25,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:25,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,743 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,42095,1733574084703' ***** 2024-12-07T12:21:25,743 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:21:25,744 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:21:25,744 INFO [RS:0;27c6fcd7dac8:42095 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:42095. 2024-12-07T12:21:25,744 DEBUG [RS:0;27c6fcd7dac8:42095 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:21:25,745 DEBUG [RS:0;27c6fcd7dac8:42095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,745 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:21:25,745 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:21:25,745 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:21:25,745 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:21:25,745 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T12:21:25,746 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T12:21:25,746 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:21:25,746 DEBUG [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T12:21:25,746 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:21:25,746 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:21:25,746 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:21:25,746 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:21:25,746 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-07T12:21:25,765 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/.tmp/ns/3f088b63aee34effb92cecbf1435a5c4 is 43, key is default/ns:d/1733574085638/Put/seqid=0 2024-12-07T12:21:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741835_1011 (size=5153) 2024-12-07T12:21:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741835_1011 (size=5153) 2024-12-07T12:21:25,772 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/.tmp/ns/3f088b63aee34effb92cecbf1435a5c4 2024-12-07T12:21:25,780 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/.tmp/ns/3f088b63aee34effb92cecbf1435a5c4 as hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/ns/3f088b63aee34effb92cecbf1435a5c4 2024-12-07T12:21:25,787 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/ns/3f088b63aee34effb92cecbf1435a5c4, entries=2, sequenceid=6, filesize=5.0 K 2024-12-07T12:21:25,789 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-12-07T12:21:25,794 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T12:21:25,795 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:21:25,795 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:25,795 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574085746Running coprocessor pre-close hooks at 1733574085746Disabling compacts and flushes for region at 1733574085746Disabling writes for close at 1733574085746Obtaining lock to block concurrent updates at 1733574085746Preparing flush snapshotting stores in 1588230740 at 1733574085746Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733574085746Flushing stores of hbase:meta,,1.1588230740 at 1733574085747 (+1 ms)Flushing 1588230740/ns: creating writer at 1733574085748 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733574085765 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733574085765Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cccbdbf: reopening flushed file at 1733574085779 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1733574085789 (+10 ms)Writing region close event to WAL at 1733574085790 (+1 ms)Running coprocessor post-close hooks at 1733574085795 (+5 ms)Closed at 1733574085795 2024-12-07T12:21:25,795 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:25,946 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,42095,1733574084703; all regions closed. 2024-12-07T12:21:25,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741834_1010 (size=1152) 2024-12-07T12:21:25,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741834_1010 (size=1152) 2024-12-07T12:21:25,953 DEBUG [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/oldWALs 2024-12-07T12:21:25,953 INFO [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C42095%2C1733574084703.meta:.meta(num 1733574085581) 2024-12-07T12:21:25,953 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,954 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,954 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,954 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,954 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:25,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741833_1009 (size=93) 2024-12-07T12:21:25,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741833_1009 (size=93) 2024-12-07T12:21:25,959 DEBUG [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/oldWALs 2024-12-07T12:21:25,959 INFO [RS:0;27c6fcd7dac8:42095 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C42095%2C1733574084703:(num 1733574085228) 2024-12-07T12:21:25,959 DEBUG [RS:0;27c6fcd7dac8:42095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:25,959 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:21:25,959 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:21:25,960 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:21:25,960 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:21:25,960 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:21:25,960 INFO [RS:0;27c6fcd7dac8:42095 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42095 2024-12-07T12:21:25,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,42095,1733574084703 2024-12-07T12:21:25,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:21:25,963 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:21:25,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,42095,1733574084703] 2024-12-07T12:21:25,966 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,42095,1733574084703 already deleted, retry=false 2024-12-07T12:21:25,966 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,42095,1733574084703 expired; onlineServers=0 2024-12-07T12:21:25,966 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,40779,1733574084653' ***** 2024-12-07T12:21:25,966 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:21:25,966 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:21:25,966 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:21:25,966 DEBUG [M:0;27c6fcd7dac8:40779 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:21:25,967 DEBUG [M:0;27c6fcd7dac8:40779 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:21:25,967 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:21:25,967 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574084956 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574084956,5,FailOnTimeoutGroup] 2024-12-07T12:21:25,967 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574084956 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574084956,5,FailOnTimeoutGroup] 2024-12-07T12:21:25,967 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:21:25,967 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:21:25,967 DEBUG [M:0;27c6fcd7dac8:40779 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:21:25,967 INFO [M:0;27c6fcd7dac8:40779 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:21:25,967 INFO [M:0;27c6fcd7dac8:40779 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:21:25,967 INFO [M:0;27c6fcd7dac8:40779 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:21:25,967 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:21:25,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:21:25,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:25,968 DEBUG [M:0;27c6fcd7dac8:40779 {}] zookeeper.ZKUtil(347): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:21:25,969 WARN [M:0;27c6fcd7dac8:40779 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:21:25,969 INFO [M:0;27c6fcd7dac8:40779 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/.lastflushedseqids 2024-12-07T12:21:25,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741836_1012 (size=99) 2024-12-07T12:21:25,976 INFO [M:0;27c6fcd7dac8:40779 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:21:25,977 INFO [M:0;27c6fcd7dac8:40779 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:21:25,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741836_1012 (size=99) 2024-12-07T12:21:25,977 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:21:25,977 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:25,977 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:25,977 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:21:25,977 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:25,977 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-07T12:21:26,001 DEBUG [M:0;27c6fcd7dac8:40779 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e34d80022658479a8549a00efc614633 is 82, key is hbase:meta,,1/info:regioninfo/1733574085617/Put/seqid=0 2024-12-07T12:21:26,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741837_1013 (size=5672) 2024-12-07T12:21:26,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741837_1013 (size=5672) 2024-12-07T12:21:26,008 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e34d80022658479a8549a00efc614633 2024-12-07T12:21:26,036 DEBUG [M:0;27c6fcd7dac8:40779 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ef35df75043a42808518002f250d64e9 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733574085644/Put/seqid=0 2024-12-07T12:21:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741838_1014 (size=5275) 2024-12-07T12:21:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741838_1014 (size=5275) 2024-12-07T12:21:26,043 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ef35df75043a42808518002f250d64e9 2024-12-07T12:21:26,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:26,065 INFO [RS:0;27c6fcd7dac8:42095 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:21:26,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42095-0x1018cdf32aa0001, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:26,065 INFO [RS:0;27c6fcd7dac8:42095 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,42095,1733574084703; zookeeper connection closed. 2024-12-07T12:21:26,065 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2f4a5e9 2024-12-07T12:21:26,066 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:21:26,073 DEBUG [M:0;27c6fcd7dac8:40779 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f25f14690a574162b7d21f594b74657f is 69, key is 27c6fcd7dac8,42095,1733574084703/rs:state/1733574085054/Put/seqid=0 2024-12-07T12:21:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741839_1015 (size=5156) 2024-12-07T12:21:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741839_1015 (size=5156) 2024-12-07T12:21:26,080 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f25f14690a574162b7d21f594b74657f 2024-12-07T12:21:26,111 DEBUG [M:0;27c6fcd7dac8:40779 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f0abd30a2be4dcb85af93707c605967 is 52, key is load_balancer_on/state:d/1733574085738/Put/seqid=0 2024-12-07T12:21:26,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741840_1016 (size=5056) 2024-12-07T12:21:26,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741840_1016 (size=5056) 2024-12-07T12:21:26,119 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f0abd30a2be4dcb85af93707c605967 2024-12-07T12:21:26,127 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e34d80022658479a8549a00efc614633 as hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e34d80022658479a8549a00efc614633 2024-12-07T12:21:26,134 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e34d80022658479a8549a00efc614633, entries=8, sequenceid=29, filesize=5.5 K 2024-12-07T12:21:26,135 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ef35df75043a42808518002f250d64e9 as hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ef35df75043a42808518002f250d64e9 2024-12-07T12:21:26,142 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ef35df75043a42808518002f250d64e9, entries=3, sequenceid=29, filesize=5.2 K 2024-12-07T12:21:26,143 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f25f14690a574162b7d21f594b74657f as hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f25f14690a574162b7d21f594b74657f 2024-12-07T12:21:26,150 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f25f14690a574162b7d21f594b74657f, entries=1, sequenceid=29, filesize=5.0 K 2024-12-07T12:21:26,151 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f0abd30a2be4dcb85af93707c605967 as hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5f0abd30a2be4dcb85af93707c605967 2024-12-07T12:21:26,158 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36683/user/jenkins/test-data/08eb1668-0adf-6410-7d37-8110c0a25e68/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5f0abd30a2be4dcb85af93707c605967, entries=1, sequenceid=29, filesize=4.9 K 2024-12-07T12:21:26,160 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 183ms, sequenceid=29, compaction requested=false 2024-12-07T12:21:26,168 INFO [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:26,168 DEBUG [M:0;27c6fcd7dac8:40779 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574085977Disabling compacts and flushes for region at 1733574085977Disabling writes for close at 1733574085977Obtaining lock to block concurrent updates at 1733574085977Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574085977Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733574085978 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574085978Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574085979 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574086000 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574086000Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574086016 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574086035 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574086035Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574086050 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574086072 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574086073 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574086087 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574086110 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574086110Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7426b400: reopening flushed file at 1733574086125 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61f358e3: reopening flushed file at 1733574086134 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f3db5e: reopening flushed file at 1733574086142 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c34f967: reopening flushed file at 1733574086150 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 183ms, sequenceid=29, compaction requested=false at 1733574086160 (+10 ms)Writing region close event to WAL at 1733574086168 (+8 ms)Closed at 1733574086168 2024-12-07T12:21:26,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:26,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:26,169 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:26,169 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:26,169 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:26,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741830_1006 (size=10311) 2024-12-07T12:21:26,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36455 is added to blk_1073741830_1006 (size=10311) 2024-12-07T12:21:26,184 INFO [M:0;27c6fcd7dac8:40779 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:21:26,184 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:21:26,185 INFO [M:0;27c6fcd7dac8:40779 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40779 2024-12-07T12:21:26,185 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:21:26,287 INFO [M:0;27c6fcd7dac8:40779 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:21:26,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:26,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40779-0x1018cdf32aa0000, quorum=127.0.0.1:54849, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:21:26,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@af33574{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:26,292 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:26,292 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:26,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:26,292 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:26,295 WARN [BP-1911068295-172.17.0.2-1733574083925 heartbeating to localhost/127.0.0.1:36683 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:26,295 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:26,296 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:26,296 WARN [BP-1911068295-172.17.0.2-1733574083925 heartbeating to localhost/127.0.0.1:36683 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1911068295-172.17.0.2-1733574083925 (Datanode Uuid a22ce972-bbb9-4038-99a5-7de959bc5a3b) service to localhost/127.0.0.1:36683 2024-12-07T12:21:26,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data3/current/BP-1911068295-172.17.0.2-1733574083925 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:26,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data4/current/BP-1911068295-172.17.0.2-1733574083925 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:26,297 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:26,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36505daf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:26,300 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:26,300 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:26,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:26,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:26,302 WARN [BP-1911068295-172.17.0.2-1733574083925 heartbeating to localhost/127.0.0.1:36683 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:26,303 WARN [BP-1911068295-172.17.0.2-1733574083925 heartbeating to localhost/127.0.0.1:36683 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1911068295-172.17.0.2-1733574083925 (Datanode Uuid 6cd48622-6547-47b4-9e9e-bdb487a10b31) service to localhost/127.0.0.1:36683 2024-12-07T12:21:26,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data1/current/BP-1911068295-172.17.0.2-1733574083925 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:26,303 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/cluster_62c3eae8-243c-2e68-7096-7d71be6e8b40/data/data2/current/BP-1911068295-172.17.0.2-1733574083925 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:26,304 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:26,304 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:26,304 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:26,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:21:26,311 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:26,311 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:26,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:26,311 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:26,318 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.log.dir so I do NOT create it in target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bfad2c6f-fe6b-5a21-21ae-793f9924e8e4/hadoop.tmp.dir so I do NOT create it in target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264, deleteOnExit=true 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:21:26,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/test.cache.data in system properties and HBase conf 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:21:26,340 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:21:26,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:21:26,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:21:26,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:21:26,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:21:26,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:21:26,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:21:26,362 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:21:26,457 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:26,464 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:26,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:26,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:26,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:21:26,473 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:26,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24befc55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:26,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f1f9cf1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:26,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35d13a28{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-40557-hadoop-hdfs-3_4_1-tests_jar-_-any-15585449228540133073/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:21:26,630 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d790455{HTTP/1.1, (http/1.1)}{localhost:40557} 2024-12-07T12:21:26,630 INFO [Time-limited test {}] server.Server(415): Started @104048ms 2024-12-07T12:21:26,649 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:21:26,733 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:26,736 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:26,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:26,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:26,738 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:26,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22a56d6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:26,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57f0db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:26,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cdd36{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-44287-hadoop-hdfs-3_4_1-tests_jar-_-any-1788312708506291057/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:26,872 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bb5d847{HTTP/1.1, (http/1.1)}{localhost:44287} 2024-12-07T12:21:26,872 INFO [Time-limited test {}] server.Server(415): Started @104289ms 2024-12-07T12:21:26,874 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:26,937 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:26,945 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:26,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:26,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:26,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:26,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63af9ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:26,953 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e0a663b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:27,023 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data1/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:27,025 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data2/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:27,060 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4aec7886d392ce4 with lease ID 0xbd7254818a6a002b: Processing first storage report for DS-e767951a-f65f-4302-8516-77a19232ec3c from datanode DatanodeRegistration(127.0.0.1:38045, datanodeUuid=ab2838e6-92f4-4080-aba6-2942391d8a34, infoPort=44953, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:27,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4aec7886d392ce4 with lease ID 0xbd7254818a6a002b: from storage DS-e767951a-f65f-4302-8516-77a19232ec3c node DatanodeRegistration(127.0.0.1:38045, datanodeUuid=ab2838e6-92f4-4080-aba6-2942391d8a34, infoPort=44953, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:21:27,064 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4aec7886d392ce4 with lease ID 0xbd7254818a6a002b: Processing first storage report for DS-c8936eb4-b1ff-422d-a328-ff0409ee7660 from datanode DatanodeRegistration(127.0.0.1:38045, datanodeUuid=ab2838e6-92f4-4080-aba6-2942391d8a34, infoPort=44953, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:27,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4aec7886d392ce4 with lease ID 0xbd7254818a6a002b: from storage DS-c8936eb4-b1ff-422d-a328-ff0409ee7660 node DatanodeRegistration(127.0.0.1:38045, datanodeUuid=ab2838e6-92f4-4080-aba6-2942391d8a34, infoPort=44953, infoSecurePort=0, ipcPort=35217, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:27,085 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:21:27,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53c8d161{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-36729-hadoop-hdfs-3_4_1-tests_jar-_-any-4495472142174633642/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:27,104 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bb19ef9{HTTP/1.1, (http/1.1)}{localhost:36729} 2024-12-07T12:21:27,104 INFO [Time-limited test {}] server.Server(415): Started @104522ms 2024-12-07T12:21:27,106 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:27,217 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:27,217 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:27,244 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e3b0ff9e6ede4a8 with lease ID 0xbd7254818a6a002c: Processing first storage report for DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb from datanode DatanodeRegistration(127.0.0.1:42901, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=46011, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e3b0ff9e6ede4a8 with lease ID 0xbd7254818a6a002c: from storage DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb node DatanodeRegistration(127.0.0.1:42901, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=46011, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e3b0ff9e6ede4a8 with lease ID 0xbd7254818a6a002c: Processing first storage report for DS-ceee7fcf-0713-4f8f-8cd6-4526b75824fc from datanode DatanodeRegistration(127.0.0.1:42901, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=46011, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:27,247 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e3b0ff9e6ede4a8 with lease ID 0xbd7254818a6a002c: from storage DS-ceee7fcf-0713-4f8f-8cd6-4526b75824fc node DatanodeRegistration(127.0.0.1:42901, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=46011, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:27,349 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6 2024-12-07T12:21:27,361 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/zookeeper_0, clientPort=62922, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:21:27,362 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62922 2024-12-07T12:21:27,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:21:27,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:21:27,389 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3 with version=8 2024-12-07T12:21:27,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:21:27,391 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:21:27,391 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:21:27,392 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:21:27,393 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43989 2024-12-07T12:21:27,394 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43989 connecting to ZooKeeper ensemble=127.0.0.1:62922 2024-12-07T12:21:27,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:439890x0, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:21:27,402 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43989-0x1018cdf3d480000 connected 2024-12-07T12:21:27,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:27,441 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3, hbase.cluster.distributed=false 2024-12-07T12:21:27,443 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:21:27,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43989 2024-12-07T12:21:27,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43989 2024-12-07T12:21:27,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43989 2024-12-07T12:21:27,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43989 2024-12-07T12:21:27,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43989 2024-12-07T12:21:27,478 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:21:27,479 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:21:27,480 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46855 2024-12-07T12:21:27,481 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46855 connecting to ZooKeeper ensemble=127.0.0.1:62922 2024-12-07T12:21:27,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468550x0, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:21:27,494 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468550x0, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:21:27,495 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:21:27,496 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46855-0x1018cdf3d480001 connected 2024-12-07T12:21:27,500 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:21:27,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:21:27,502 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:21:27,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46855 2024-12-07T12:21:27,505 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46855 2024-12-07T12:21:27,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46855 2024-12-07T12:21:27,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46855 2024-12-07T12:21:27,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46855 2024-12-07T12:21:27,527 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:43989 2024-12-07T12:21:27,528 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:27,533 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:27,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:21:27,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:27,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:27,540 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:21:27,542 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,43989,1733574087391 from backup master directory 2024-12-07T12:21:27,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:27,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:27,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:21:27,544 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:21:27,544 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:27,553 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/hbase.id] with ID: 653755b3-d875-4f21-b722-31e9305d80af 2024-12-07T12:21:27,553 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/.tmp/hbase.id 2024-12-07T12:21:27,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:21:27,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:21:27,568 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/.tmp/hbase.id]:[hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/hbase.id] 2024-12-07T12:21:27,585 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:27,585 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:21:27,587 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T12:21:27,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:27,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:27,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:21:27,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:21:27,615 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:21:27,616 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:21:27,616 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:21:27,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:21:27,634 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store 2024-12-07T12:21:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:21:27,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:21:28,053 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:28,053 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:21:28,053 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574088053Disabling compacts and flushes for region at 1733574088053Disabling writes for close at 1733574088053Writing region close event to WAL at 1733574088053Closed at 1733574088053 2024-12-07T12:21:28,054 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/.initializing 2024-12-07T12:21:28,055 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:28,058 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C43989%2C1733574087391, suffix=, logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391, archiveDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/oldWALs, maxLogs=10 2024-12-07T12:21:28,058 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 2024-12-07T12:21:28,065 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 2024-12-07T12:21:28,066 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46011:46011),(127.0.0.1/127.0.0.1:44953:44953)] 2024-12-07T12:21:28,067 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:21:28,067 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:28,067 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,067 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:21:28,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:21:28,073 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:28,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:21:28,075 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:28,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:21:28,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:28,078 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,079 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,079 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,081 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,081 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,081 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:21:28,082 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:21:28,085 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:21:28,085 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786293, jitterRate=-1.77040696144104E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:21:28,087 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574088067Initializing all the Stores at 1733574088068 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088068Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574088069 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574088069Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574088069Cleaning up temporary data from old regions at 1733574088081 (+12 ms)Region opened successfully at 1733574088087 (+6 ms) 2024-12-07T12:21:28,087 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:21:28,091 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e2ebfe5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:21:28,092 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:21:28,092 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:21:28,092 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:21:28,092 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:21:28,093 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:21:28,093 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:21:28,093 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:21:28,095 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:21:28,096 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:21:28,098 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:21:28,098 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:21:28,099 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:21:28,100 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:21:28,100 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:21:28,101 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:21:28,102 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:21:28,103 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:21:28,104 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:21:28,106 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:21:28,108 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:21:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:21:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,110 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,43989,1733574087391, sessionid=0x1018cdf3d480000, setting cluster-up flag (Was=false) 2024-12-07T12:21:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,120 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:21:28,121 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:28,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,129 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:21:28,130 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:28,132 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:21:28,133 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:28,134 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:21:28,134 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:21:28,134 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,43989,1733574087391 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:21:28,135 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:28,135 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:28,135 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:28,136 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:21:28,136 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:21:28,136 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,136 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:21:28,136 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,138 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:28,138 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:21:28,139 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,139 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:21:28,140 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574118140 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:21:28,142 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:21:28,142 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:21:28,142 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:21:28,142 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:21:28,144 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574088142,5,FailOnTimeoutGroup] 2024-12-07T12:21:28,145 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574088144,5,FailOnTimeoutGroup] 2024-12-07T12:21:28,145 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,145 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:21:28,145 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,145 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:21:28,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:21:28,150 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:21:28,150 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3 2024-12-07T12:21:28,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:21:28,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:21:28,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:28,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:21:28,164 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:21:28,164 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:21:28,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:21:28,166 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:21:28,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:21:28,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:21:28,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:21:28,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,172 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:21:28,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740 2024-12-07T12:21:28,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740 2024-12-07T12:21:28,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:21:28,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:21:28,176 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:21:28,178 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:21:28,185 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:21:28,185 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878911, jitterRate=0.11759394407272339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:21:28,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574088161Initializing all the Stores at 1733574088162 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088162Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088162Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574088162Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088162Cleaning up temporary data from old regions at 1733574088175 (+13 ms)Region opened successfully at 1733574088186 (+11 ms) 2024-12-07T12:21:28,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:21:28,187 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:21:28,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:21:28,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:21:28,187 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:21:28,187 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:21:28,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574088187Disabling compacts and flushes for region at 1733574088187Disabling writes for close at 1733574088187Writing region close event to WAL at 1733574088187Closed at 1733574088187 2024-12-07T12:21:28,189 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:28,189 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:21:28,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:21:28,191 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:21:28,192 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:21:28,217 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(746): ClusterId : 653755b3-d875-4f21-b722-31e9305d80af 2024-12-07T12:21:28,217 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:21:28,221 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:21:28,221 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:21:28,223 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:21:28,223 DEBUG [RS:0;27c6fcd7dac8:46855 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c472d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:21:28,237 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:46855 2024-12-07T12:21:28,237 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:21:28,237 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:21:28,237 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:21:28,238 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,43989,1733574087391 with port=46855, startcode=1733574087478 2024-12-07T12:21:28,238 DEBUG [RS:0;27c6fcd7dac8:46855 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:21:28,240 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55037, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:21:28,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43989 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,241 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43989 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,243 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3 2024-12-07T12:21:28,243 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45085 2024-12-07T12:21:28,243 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:21:28,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:21:28,245 DEBUG [RS:0;27c6fcd7dac8:46855 {}] zookeeper.ZKUtil(111): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,245 WARN [RS:0;27c6fcd7dac8:46855 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:21:28,246 INFO [RS:0;27c6fcd7dac8:46855 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:28,246 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,246 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,46855,1733574087478] 2024-12-07T12:21:28,252 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:21:28,255 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:21:28,258 INFO [RS:0;27c6fcd7dac8:46855 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:21:28,258 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,258 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:21:28,260 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:21:28,260 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,260 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:28,261 DEBUG [RS:0;27c6fcd7dac8:46855 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,269 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,46855,1733574087478-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:21:28,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:21:28,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:21:28,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-07T12:21:28,293 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:21:28,293 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,46855,1733574087478-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,294 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,294 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.Replication(171): 27c6fcd7dac8,46855,1733574087478 started 2024-12-07T12:21:28,318 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,318 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,46855,1733574087478, RpcServer on 27c6fcd7dac8/172.17.0.2:46855, sessionid=0x1018cdf3d480001 2024-12-07T12:21:28,318 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:21:28,319 DEBUG [RS:0;27c6fcd7dac8:46855 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,319 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,46855,1733574087478' 2024-12-07T12:21:28,319 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:21:28,319 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:21:28,320 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:21:28,320 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:21:28,320 DEBUG [RS:0;27c6fcd7dac8:46855 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,320 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,46855,1733574087478' 2024-12-07T12:21:28,320 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:21:28,321 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:21:28,321 DEBUG [RS:0;27c6fcd7dac8:46855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:21:28,321 INFO [RS:0;27c6fcd7dac8:46855 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:21:28,321 INFO [RS:0;27c6fcd7dac8:46855 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:21:28,342 WARN [27c6fcd7dac8:43989 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:21:28,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,424 INFO [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C46855%2C1733574087478, suffix=, logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478, archiveDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs, maxLogs=32 2024-12-07T12:21:28,426 INFO [RS:0;27c6fcd7dac8:46855 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 2024-12-07T12:21:28,436 INFO [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 2024-12-07T12:21:28,442 DEBUG [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46011:46011),(127.0.0.1/127.0.0.1:44953:44953)] 2024-12-07T12:21:28,593 DEBUG [27c6fcd7dac8:43989 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:21:28,593 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,595 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,46855,1733574087478, state=OPENING 2024-12-07T12:21:28,597 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:21:28,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:21:28,599 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:28,599 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:21:28,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,46855,1733574087478}] 2024-12-07T12:21:28,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:28,753 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:21:28,756 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:21:28,761 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:21:28,761 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:28,763 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C46855%2C1733574087478.meta, suffix=.meta, logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478, archiveDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs, maxLogs=32 2024-12-07T12:21:28,764 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta 2024-12-07T12:21:28,771 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta 2024-12-07T12:21:28,771 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46011:46011),(127.0.0.1/127.0.0.1:44953:44953)] 2024-12-07T12:21:28,775 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:21:28,776 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:21:28,776 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:21:28,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:21:28,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:21:28,780 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:21:28,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:21:28,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:21:28,783 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:21:28,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:21:28,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:21:28,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:28,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:21:28,785 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:21:28,786 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740 2024-12-07T12:21:28,787 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740 2024-12-07T12:21:28,789 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:21:28,789 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:21:28,790 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:21:28,791 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:21:28,792 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743933, jitterRate=-0.05404064059257507}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:21:28,792 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:21:28,794 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574088776Writing region info on filesystem at 1733574088776Initializing all the Stores at 1733574088778 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088778Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574088778Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574088778Cleaning up temporary data from old regions at 1733574088789 (+11 ms)Running coprocessor post-open hooks at 1733574088792 (+3 ms)Region opened successfully at 1733574088794 (+2 ms) 2024-12-07T12:21:28,795 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574088753 2024-12-07T12:21:28,799 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:21:28,799 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:21:28,801 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,802 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,46855,1733574087478, state=OPEN 2024-12-07T12:21:28,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:21:28,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:21:28,808 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:28,808 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:28,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:21:28,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:21:28,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,46855,1733574087478 in 210 msec 2024-12-07T12:21:28,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:21:28,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 624 msec 2024-12-07T12:21:28,818 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:21:28,818 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:21:28,820 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:21:28,820 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,46855,1733574087478, seqNum=-1] 2024-12-07T12:21:28,820 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:21:28,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46139, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:21:28,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 695 msec 2024-12-07T12:21:28,830 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574088830, completionTime=-1 2024-12-07T12:21:28,830 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:21:28,830 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574148833 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574208833 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,833 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,834 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:43989, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,834 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,834 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,836 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:21:28,839 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.295sec 2024-12-07T12:21:28,839 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:21:28,839 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:21:28,839 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:21:28,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:21:28,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:21:28,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:21:28,840 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:21:28,843 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:21:28,843 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:21:28,843 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,43989,1733574087391-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:28,876 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:21:28,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:28,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e800148, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:28,917 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,43989,-1 for getting cluster id 2024-12-07T12:21:28,918 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:21:28,922 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '653755b3-d875-4f21-b722-31e9305d80af' 2024-12-07T12:21:28,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:21:28,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "653755b3-d875-4f21-b722-31e9305d80af" 2024-12-07T12:21:28,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f9ef2e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:28,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,43989,-1] 2024-12-07T12:21:28,924 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:21:28,924 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:21:28,926 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44384, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:21:28,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1744a862, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:21:28,927 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:21:28,928 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,46855,1733574087478, seqNum=-1] 2024-12-07T12:21:28,928 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:21:28,930 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51196, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:21:28,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:28,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:28,936 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:21:28,963 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:21:28,963 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:21:28,964 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37667 2024-12-07T12:21:28,966 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37667 connecting to ZooKeeper ensemble=127.0.0.1:62922 2024-12-07T12:21:28,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:28,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:21:28,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376670x0, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:21:28,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-07T12:21:28,974 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37667-0x1018cdf3d480002 connected 2024-12-07T12:21:28,975 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:21:28,975 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:21:28,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:21:28,976 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-07T12:21:28,978 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:21:28,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37667 2024-12-07T12:21:29,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37667 2024-12-07T12:21:29,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37667 2024-12-07T12:21:29,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37667 2024-12-07T12:21:29,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37667 2024-12-07T12:21:29,018 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(746): ClusterId : 653755b3-d875-4f21-b722-31e9305d80af 2024-12-07T12:21:29,019 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:21:29,021 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:21:29,021 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:21:29,025 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:21:29,026 DEBUG [RS:1;27c6fcd7dac8:37667 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a6385a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:21:29,038 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;27c6fcd7dac8:37667 2024-12-07T12:21:29,039 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:21:29,039 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:21:29,039 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:21:29,040 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,43989,1733574087391 with port=37667, startcode=1733574088962 2024-12-07T12:21:29,040 DEBUG [RS:1;27c6fcd7dac8:37667 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:21:29,043 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60963, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:21:29,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43989 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43989 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,046 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3 2024-12-07T12:21:29,046 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45085 2024-12-07T12:21:29,046 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:21:29,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:21:29,048 DEBUG [RS:1;27c6fcd7dac8:37667 {}] zookeeper.ZKUtil(111): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,048 WARN [RS:1;27c6fcd7dac8:37667 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:21:29,048 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,37667,1733574088962] 2024-12-07T12:21:29,048 INFO [RS:1;27c6fcd7dac8:37667 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:21:29,049 DEBUG [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,053 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:21:29,055 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:21:29,056 INFO [RS:1;27c6fcd7dac8:37667 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:21:29,056 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,057 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:21:29,058 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:21:29,058 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,058 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:29,059 DEBUG [RS:1;27c6fcd7dac8:37667 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:21:29,059 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,060 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,060 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,060 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,060 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,060 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,37667,1733574088962-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:21:29,076 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:21:29,077 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,37667,1733574088962-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,077 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,077 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.Replication(171): 27c6fcd7dac8,37667,1733574088962 started 2024-12-07T12:21:29,091 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:21:29,091 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,37667,1733574088962, RpcServer on 27c6fcd7dac8/172.17.0.2:37667, sessionid=0x1018cdf3d480002 2024-12-07T12:21:29,091 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:21:29,091 DEBUG [RS:1;27c6fcd7dac8:37667 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,091 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,37667,1733574088962' 2024-12-07T12:21:29,091 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:21:29,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;27c6fcd7dac8:37667,5,FailOnTimeoutGroup] 2024-12-07T12:21:29,092 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-07T12:21:29,092 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:21:29,092 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,37667,1733574088962 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,37667,1733574088962' 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:21:29,093 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:21:29,094 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 27c6fcd7dac8,43989,1733574087391 2024-12-07T12:21:29,094 DEBUG [RS:1;27c6fcd7dac8:37667 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:21:29,094 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@604ab482 2024-12-07T12:21:29,094 INFO [RS:1;27c6fcd7dac8:37667 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:21:29,094 INFO [RS:1;27c6fcd7dac8:37667 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:21:29,094 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:21:29,096 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:21:29,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T12:21:29,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T12:21:29,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:21:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T12:21:29,103 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:21:29,103 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:29,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-07T12:21:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:21:29,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:21:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741835_1011 (size=393) 2024-12-07T12:21:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741835_1011 (size=393) 2024-12-07T12:21:29,126 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a3789a04774a73dc51c7f9b2ac34edb6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3 2024-12-07T12:21:29,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38045 is added to blk_1073741836_1012 (size=76) 2024-12-07T12:21:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741836_1012 (size=76) 2024-12-07T12:21:29,159 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:29,159 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a3789a04774a73dc51c7f9b2ac34edb6, disabling compactions & flushes 2024-12-07T12:21:29,159 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,159 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,159 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. after waiting 0 ms 2024-12-07T12:21:29,160 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,160 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,160 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a3789a04774a73dc51c7f9b2ac34edb6: Waiting for close lock at 1733574089159Disabling compacts and flushes for region at 1733574089159Disabling writes for close at 1733574089159Writing region close event to WAL at 1733574089160 (+1 ms)Closed at 1733574089160 2024-12-07T12:21:29,161 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:21:29,162 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733574089161"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574089161"}]},"ts":"1733574089161"} 2024-12-07T12:21:29,164 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:21:29,166 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:21:29,166 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574089166"}]},"ts":"1733574089166"} 2024-12-07T12:21:29,168 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-07T12:21:29,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a3789a04774a73dc51c7f9b2ac34edb6, ASSIGN}] 2024-12-07T12:21:29,170 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a3789a04774a73dc51c7f9b2ac34edb6, ASSIGN 2024-12-07T12:21:29,171 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a3789a04774a73dc51c7f9b2ac34edb6, ASSIGN; state=OFFLINE, location=27c6fcd7dac8,46855,1733574087478; forceNewPlan=false, retain=false 2024-12-07T12:21:29,196 INFO [RS:1;27c6fcd7dac8:37667 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C37667%2C1733574088962, suffix=, logDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962, archiveDir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs, maxLogs=32 2024-12-07T12:21:29,198 INFO [RS:1;27c6fcd7dac8:37667 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 2024-12-07T12:21:29,205 INFO [RS:1;27c6fcd7dac8:37667 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 2024-12-07T12:21:29,206 DEBUG [RS:1;27c6fcd7dac8:37667 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44953:44953),(127.0.0.1/127.0.0.1:46011:46011)] 2024-12-07T12:21:29,322 INFO [27c6fcd7dac8:43989 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T12:21:29,322 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a3789a04774a73dc51c7f9b2ac34edb6, regionState=OPENING, regionLocation=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:29,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a3789a04774a73dc51c7f9b2ac34edb6, ASSIGN because future has completed 2024-12-07T12:21:29,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a3789a04774a73dc51c7f9b2ac34edb6, server=27c6fcd7dac8,46855,1733574087478}] 2024-12-07T12:21:29,484 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,484 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a3789a04774a73dc51c7f9b2ac34edb6, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:21:29,485 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,485 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:21:29,485 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,485 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,487 INFO [StoreOpener-a3789a04774a73dc51c7f9b2ac34edb6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,488 INFO [StoreOpener-a3789a04774a73dc51c7f9b2ac34edb6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a3789a04774a73dc51c7f9b2ac34edb6 columnFamilyName info 2024-12-07T12:21:29,489 DEBUG [StoreOpener-a3789a04774a73dc51c7f9b2ac34edb6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:21:29,489 INFO [StoreOpener-a3789a04774a73dc51c7f9b2ac34edb6-1 {}] regionserver.HStore(327): Store=a3789a04774a73dc51c7f9b2ac34edb6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:21:29,489 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,490 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,491 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,491 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,491 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,493 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,495 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:21:29,496 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a3789a04774a73dc51c7f9b2ac34edb6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729742, jitterRate=-0.07208557426929474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:21:29,496 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:29,496 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a3789a04774a73dc51c7f9b2ac34edb6: Running coprocessor pre-open hook at 1733574089485Writing region info on filesystem at 1733574089485Initializing all the Stores at 1733574089486 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574089486Cleaning up temporary data from old regions at 1733574089491 (+5 ms)Running coprocessor post-open hooks at 1733574089496 (+5 ms)Region opened successfully at 1733574089496 2024-12-07T12:21:29,498 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., pid=6, masterSystemTime=1733574089479 2024-12-07T12:21:29,500 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,500 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:29,502 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a3789a04774a73dc51c7f9b2ac34edb6, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,46855,1733574087478 2024-12-07T12:21:29,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a3789a04774a73dc51c7f9b2ac34edb6, server=27c6fcd7dac8,46855,1733574087478 because future has completed 2024-12-07T12:21:29,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:21:29,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a3789a04774a73dc51c7f9b2ac34edb6, server=27c6fcd7dac8,46855,1733574087478 in 179 msec 2024-12-07T12:21:29,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:21:29,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a3789a04774a73dc51c7f9b2ac34edb6, ASSIGN in 340 msec 2024-12-07T12:21:29,513 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:21:29,513 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574089513"}]},"ts":"1733574089513"} 2024-12-07T12:21:29,515 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-07T12:21:29,516 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:21:29,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 418 msec 2024-12-07T12:21:33,781 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:21:33,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:33,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:33,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:33,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:21:34,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:21:34,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-07T12:21:38,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:21:38,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T12:21:38,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T12:21:38,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-07T12:21:38,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:21:38,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T12:21:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43989 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:21:39,206 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-07T12:21:39,206 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-07T12:21:39,209 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T12:21:39,210 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:39,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:39,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:39,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:39,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:39,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:21:39,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2af8b745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:39,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6030d470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:39,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41559526{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-38257-hadoop-hdfs-3_4_1-tests_jar-_-any-11776158148172082747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:39,345 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@157a3fca{HTTP/1.1, (http/1.1)}{localhost:38257} 2024-12-07T12:21:39,345 INFO [Time-limited test {}] server.Server(415): Started @116762ms 2024-12-07T12:21:39,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:39,380 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:39,383 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:39,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:39,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:39,384 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:39,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@156f3a55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:39,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ca82099{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:39,456 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data5/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,456 WARN [Thread-831 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data6/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,479 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5763dbe18622b7b9 with lease ID 0xbd7254818a6a002d: Processing first storage report for DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e from datanode DatanodeRegistration(127.0.0.1:36957, datanodeUuid=cd71cea8-3187-4258-8e55-f50772278836, infoPort=33959, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5763dbe18622b7b9 with lease ID 0xbd7254818a6a002d: from storage DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e node DatanodeRegistration(127.0.0.1:36957, datanodeUuid=cd71cea8-3187-4258-8e55-f50772278836, infoPort=33959, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5763dbe18622b7b9 with lease ID 0xbd7254818a6a002d: Processing first storage report for DS-a35b901d-cc5d-4531-a305-02410a42c0cd from datanode DatanodeRegistration(127.0.0.1:36957, datanodeUuid=cd71cea8-3187-4258-8e55-f50772278836, infoPort=33959, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,483 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5763dbe18622b7b9 with lease ID 0xbd7254818a6a002d: from storage DS-a35b901d-cc5d-4531-a305-02410a42c0cd node DatanodeRegistration(127.0.0.1:36957, datanodeUuid=cd71cea8-3187-4258-8e55-f50772278836, infoPort=33959, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,500 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b4117c9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-42709-hadoop-hdfs-3_4_1-tests_jar-_-any-15617259868784385291/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:39,501 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ccc1bc4{HTTP/1.1, (http/1.1)}{localhost:42709} 2024-12-07T12:21:39,501 INFO [Time-limited test {}] server.Server(415): Started @116919ms 2024-12-07T12:21:39,502 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:39,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:39,541 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:39,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:39,543 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:39,543 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:21:39,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@687696f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:39,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fe58b15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:39,611 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,612 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,641 WARN [Thread-845 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:39,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a0d6e6d5c8a1472 with lease ID 0xbd7254818a6a002e: Processing first storage report for DS-98ba4bde-249b-4bad-8a38-d87428fa47ee from datanode DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a0d6e6d5c8a1472 with lease ID 0xbd7254818a6a002e: from storage DS-98ba4bde-249b-4bad-8a38-d87428fa47ee node DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a0d6e6d5c8a1472 with lease ID 0xbd7254818a6a002e: Processing first storage report for DS-5f62faa7-b0b3-41f8-90b0-0a53cc8fadab from datanode DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a0d6e6d5c8a1472 with lease ID 0xbd7254818a6a002e: from storage DS-5f62faa7-b0b3-41f8-90b0-0a53cc8fadab node DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69bcca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-39175-hadoop-hdfs-3_4_1-tests_jar-_-any-1041028291982947550/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:39,671 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3247fd57{HTTP/1.1, (http/1.1)}{localhost:39175} 2024-12-07T12:21:39,672 INFO [Time-limited test {}] server.Server(415): Started @117089ms 2024-12-07T12:21:39,673 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:39,786 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data10/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,786 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data9/current/BP-1950294605-172.17.0.2-1733574086387/current, will proceed with Du for space computation calculation, 2024-12-07T12:21:39,803 WARN [Thread-880 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a2b7aac907d2d82 with lease ID 0xbd7254818a6a002f: Processing first storage report for DS-0ee68ff9-3a86-4043-89c8-9dc291195038 from datanode DatanodeRegistration(127.0.0.1:34911, datanodeUuid=e0484b5a-e768-4e4c-9c27-9b4b6ac93cdf, infoPort=40111, infoSecurePort=0, ipcPort=44683, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a2b7aac907d2d82 with lease ID 0xbd7254818a6a002f: from storage DS-0ee68ff9-3a86-4043-89c8-9dc291195038 node DatanodeRegistration(127.0.0.1:34911, datanodeUuid=e0484b5a-e768-4e4c-9c27-9b4b6ac93cdf, infoPort=40111, infoSecurePort=0, ipcPort=44683, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a2b7aac907d2d82 with lease ID 0xbd7254818a6a002f: Processing first storage report for DS-a664f036-b204-486f-85d8-c9f38c31de7c from datanode DatanodeRegistration(127.0.0.1:34911, datanodeUuid=e0484b5a-e768-4e4c-9c27-9b4b6ac93cdf, infoPort=40111, infoSecurePort=0, ipcPort=44683, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387) 2024-12-07T12:21:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a2b7aac907d2d82 with lease ID 0xbd7254818a6a002f: from storage DS-a664f036-b204-486f-85d8-c9f38c31de7c node DatanodeRegistration(127.0.0.1:34911, datanodeUuid=e0484b5a-e768-4e4c-9c27-9b4b6ac93cdf, infoPort=40111, infoSecurePort=0, ipcPort=44683, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:39,898 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,898 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,898 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,898 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,898 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:39,898 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:39,898 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:39,899 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta block BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:39,899 WARN [PacketResponder: BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42901] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48586 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48586 dst: /127.0.0.1:38045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:48570 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48570 dst: /127.0.0.1:38045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48600 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48600 dst: /127.0.0.1:38045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:40710 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40710 dst: /127.0.0.1:42901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100648405_22 at /127.0.0.1:40728 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40728 dst: /127.0.0.1:42901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1100648405_22 at /127.0.0.1:48622 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48622 dst: /127.0.0.1:38045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:40714 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40714 dst: /127.0.0.1:42901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:40674 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40674 dst: /127.0.0.1:42901 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:39,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53c8d161{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:39,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bb19ef9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:39,902 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:39,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e0a663b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:39,902 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63af9ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:39,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:39,903 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:39,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:39,903 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid 92cd3207-d7db-45e7-bcec-7240b42e4324) service to localhost/127.0.0.1:45085 2024-12-07T12:21:39,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:39,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:39,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:39,905 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta block BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,905 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,905 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,906 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cdd36{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:39,912 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bb5d847{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:39,912 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:39,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57f0db4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:39,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22a56d6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:39,914 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:39,914 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid ab2838e6-92f4-4080-aba6-2942391d8a34) service to localhost/127.0.0.1:45085 2024-12-07T12:21:39,914 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:39,914 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:39,914 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data1/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:39,915 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data2/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:39,915 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:39,918 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., hostname=27c6fcd7dac8,46855,1733574087478, seqNum=2] 2024-12-07T12:21:39,920 ERROR [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3-prefix:27c6fcd7dac8,46855,1733574087478 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,920 WARN [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3-prefix:27c6fcd7dac8,46855,1733574087478 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,920 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,921 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C46855%2C1733574087478:(num 1733574088425) roll requested 2024-12-07T12:21:39,921 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 2024-12-07T12:21:39,924 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,924 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:39,924 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741838_1018 2024-12-07T12:21:39,927 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:39,931 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1019 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,931 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:39,931 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741839_1019 2024-12-07T12:21:39,932 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:39,937 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:39,937 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:39,937 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:39,937 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:39,937 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:39,937 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 2024-12-07T12:21:39,938 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,938 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:39,939 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T12:21:39,939 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T12:21:39,939 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 2024-12-07T12:21:39,941 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40111:40111),(127.0.0.1/127.0.0.1:33959:33959)] 2024-12-07T12:21:39,941 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:39,942 WARN [IPC Server handler 0 on default port 45085 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-12-07T12:21:39,945 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 after 5ms 2024-12-07T12:21:41,060 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:41,617 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:41,941 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:41,942 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 2024-12-07T12:21:41,943 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:41,943 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:41,944 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:42448 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:34911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42448 dst: /127.0.0.1:34911 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:41,944 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:42948 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:36957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42948 dst: /127.0.0.1:36957 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:41,945 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69bcca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:41,945 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3247fd57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:41,945 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:41,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fe58b15{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:41,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@687696f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:41,947 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:41,947 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:41,947 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid e0484b5a-e768-4e4c-9c27-9b4b6ac93cdf) service to localhost/127.0.0.1:45085 2024-12-07T12:21:41,947 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:41,947 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data9/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:41,947 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data10/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:41,948 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:43,060 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:43,618 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:43,941 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:43,942 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]] 2024-12-07T12:21:43,942 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C46855%2C1733574087478:(num 1733574099921) roll requested 2024-12-07T12:21:43,942 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 2024-12-07T12:21:43,946 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 after 4007ms 2024-12-07T12:21:43,946 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:43,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:54200 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data6]'}, localName='127.0.0.1:36957', datanodeUuid='cd71cea8-3187-4258-8e55-f50772278836', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:43,946 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:43,946 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023 2024-12-07T12:21:43,947 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:54200 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T12:21:43,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:54200 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:36957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54200 dst: /127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:43,947 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:43,948 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:43,948 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:43,948 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741842_1024 2024-12-07T12:21:43,949 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:43,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:43,953 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:43,953 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:43,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:43,953 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:21:43,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:43,953 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 2024-12-07T12:21:43,955 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41629:41629),(127.0.0.1/127.0.0.1:33959:33959)] 2024-12-07T12:21:43,955 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:43,955 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 is not closed yet, will try archiving it next time 2024-12-07T12:21:43,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36957 is added to blk_1073741840_1022 (size=2431) 2024-12-07T12:21:44,356 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:45,061 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,498 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@66672bda[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36957, datanodeUuid=cd71cea8-3187-4258-8e55-f50772278836, infoPort=33959, infoSecurePort=0, ipcPort=46807, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741840_1022 to 127.0.0.1:42901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,618 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,955 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,957 WARN [ResponseProcessor for block BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,957 WARN [DataStreamer for file /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 block BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:45,957 WARN [PacketResponder: BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36957] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58984 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58984 dst: /127.0.0.1:39315 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:54204 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:36957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54204 dst: /127.0.0.1:36957 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,959 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41559526{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:45,960 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@157a3fca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:21:45,960 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:21:45,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6030d470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:21:45,960 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2af8b745{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:21:45,961 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:21:45,961 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:21:45,961 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid cd71cea8-3187-4258-8e55-f50772278836) service to localhost/127.0.0.1:45085 2024-12-07T12:21:45,962 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:21:45,962 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data5/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:45,962 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data6/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:21:45,962 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:21:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:45,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:21:45,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/621eb98441604c859fe648f3621309a5 is 1080, key is row0002/info:/1733574101949/Put/seqid=0 2024-12-07T12:21:45,990 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59008 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,990 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:45,990 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027 2024-12-07T12:21:45,990 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59008 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:45,991 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59008 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59008 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,991 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:45,992 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,992 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:45,992 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741845_1028 2024-12-07T12:21:45,993 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:45,995 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59020 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,995 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:45,995 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029 2024-12-07T12:21:45,995 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59020 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:45,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59020 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59020 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,995 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:45,997 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34911 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:45,997 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59036 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030 to mirror 127.0.0.1:34911 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,997 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:45,997 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030 2024-12-07T12:21:45,997 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59036 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:45,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59036 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59036 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:45,998 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:45,999 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:45,999 WARN [IPC Server handler 2 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:45,999 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:46,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741848_1031 (size=10347) 2024-12-07T12:21:46,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/621eb98441604c859fe648f3621309a5 2024-12-07T12:21:46,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/621eb98441604c859fe648f3621309a5 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5 2024-12-07T12:21:46,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5, entries=5, sequenceid=11, filesize=10.1 K 2024-12-07T12:21:46,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a3789a04774a73dc51c7f9b2ac34edb6 in 446ms, sequenceid=11, compaction requested=false 2024-12-07T12:21:46,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:46,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-07T12:21:46,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/4b1faa2dfec64e9794c322bddde7a9c0 is 1080, key is row0007/info:/1733574105972/Put/seqid=0 2024-12-07T12:21:46,599 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:46,599 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59060 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:46,599 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:46,599 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59060 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:46,599 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032 2024-12-07T12:21:46,599 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59060 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59060 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:46,600 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:46,601 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:46,602 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:46,602 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741850_1033 2024-12-07T12:21:46,602 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:46,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59066 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034 to mirror 127.0.0.1:34911 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:46,604 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34911 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:46,604 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59066 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:46,604 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:46,604 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034 2024-12-07T12:21:46,604 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59066 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59066 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:46,605 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:46,606 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:46,606 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:46,606 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741852_1035 2024-12-07T12:21:46,607 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:46,607 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:46,607 WARN [IPC Server handler 2 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:46,607 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:46,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741853_1036 (size=12506) 2024-12-07T12:21:47,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/4b1faa2dfec64e9794c322bddde7a9c0 2024-12-07T12:21:47,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/4b1faa2dfec64e9794c322bddde7a9c0 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0 2024-12-07T12:21:47,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0, entries=7, sequenceid=24, filesize=12.2 K 2024-12-07T12:21:47,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for a3789a04774a73dc51c7f9b2ac34edb6 in 435ms, sequenceid=24, compaction requested=false 2024-12-07T12:21:47,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:47,027 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-07T12:21:47,027 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:47,027 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0 because midkey is the same as first or last row 2024-12-07T12:21:47,061 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,618 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,956 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,956 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]] 2024-12-07T12:21:47,956 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C46855%2C1733574087478:(num 1733574103942) roll requested 2024-12-07T12:21:47,956 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 2024-12-07T12:21:47,959 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,959 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:47,959 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741854_1037 2024-12-07T12:21:47,960 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:47,961 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,961 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:47,961 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741855_1038 2024-12-07T12:21:47,962 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:47,963 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,963 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:47,963 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741856_1039 2024-12-07T12:21:47,963 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:47,964 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:47,964 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:47,964 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741857_1040 2024-12-07T12:21:47,965 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:47,965 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:47,965 WARN [IPC Server handler 1 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:47,965 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:47,968 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:47,968 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:47,968 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:47,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:47,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:47,968 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 2024-12-07T12:21:47,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741843_1026 (size=25992) 2024-12-07T12:21:47,973 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41629:41629)] 2024-12-07T12:21:47,973 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:47,973 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 is not closed yet, will try archiving it next time 2024-12-07T12:21:47,974 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C46855%2C1733574087478.1733574099921 2024-12-07T12:21:48,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:48,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T12:21:48,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/886c55735bcd4e8eba77be9b7d6e65be is 1079, key is tmprow/info:/1733574108010/Put/seqid=0 2024-12-07T12:21:48,018 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,018 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:48,018 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741859_1042 2024-12-07T12:21:48,019 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:48,020 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,020 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:48,020 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741860_1043 2024-12-07T12:21:48,020 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:48,022 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,022 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59096 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,023 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:48,023 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59096 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:48,023 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044 2024-12-07T12:21:48,023 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59096 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59096 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,023 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:48,025 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59104 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,025 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:48,025 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045 2024-12-07T12:21:48,025 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59104 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:48,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59104 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59104 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,026 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:48,026 WARN [IPC Server handler 4 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:48,026 WARN [IPC Server handler 4 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:48,026 WARN [IPC Server handler 4 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:48,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741863_1046 (size=6027) 2024-12-07T12:21:48,371 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:48,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/886c55735bcd4e8eba77be9b7d6e65be 2024-12-07T12:21:48,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/886c55735bcd4e8eba77be9b7d6e65be as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be 2024-12-07T12:21:48,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be, entries=1, sequenceid=34, filesize=5.9 K 2024-12-07T12:21:48,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a3789a04774a73dc51c7f9b2ac34edb6 in 433ms, sequenceid=34, compaction requested=true 2024-12-07T12:21:48,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:48,444 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-07T12:21:48,444 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:48,444 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0 because midkey is the same as first or last row 2024-12-07T12:21:48,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a3789a04774a73dc51c7f9b2ac34edb6:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:21:48,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:48,445 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:21:48,446 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:21:48,446 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1541): a3789a04774a73dc51c7f9b2ac34edb6/info is initiating minor compaction (all files) 2024-12-07T12:21:48,446 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a3789a04774a73dc51c7f9b2ac34edb6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:48,446 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be] into tmpdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp, totalSize=28.2 K 2024-12-07T12:21:48,446 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 621eb98441604c859fe648f3621309a5, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733574101949 2024-12-07T12:21:48,447 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4b1faa2dfec64e9794c322bddde7a9c0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733574105972 2024-12-07T12:21:48,447 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 886c55735bcd4e8eba77be9b7d6e65be, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733574108010 2024-12-07T12:21:48,460 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a3789a04774a73dc51c7f9b2ac34edb6#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:21:48,460 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/c481908d9af2431e921f3faa8df6bb4c is 1080, key is row0002/info:/1733574101949/Put/seqid=0 2024-12-07T12:21:48,462 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,462 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:48,462 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741864_1047 2024-12-07T12:21:48,462 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:48,464 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,464 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59122 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,464 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:48,465 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048 2024-12-07T12:21:48,465 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59122 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:48,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59122 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59122 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,465 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:48,467 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,467 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:48,467 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049 2024-12-07T12:21:48,467 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:48,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59136 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,467 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:48,468 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:48,468 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:48,468 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741867_1050 2024-12-07T12:21:48,469 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:48,469 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:48,469 WARN [IPC Server handler 2 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:48,469 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:48,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741868_1051 (size=17994) 2024-12-07T12:21:48,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6591b54a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741848_1031 to 127.0.0.1:38045 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b564788[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741853_1036 to 127.0.0.1:42901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:48,880 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/c481908d9af2431e921f3faa8df6bb4c as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c 2024-12-07T12:21:48,887 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a3789a04774a73dc51c7f9b2ac34edb6/info of a3789a04774a73dc51c7f9b2ac34edb6 into c481908d9af2431e921f3faa8df6bb4c(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:48,887 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., storeName=a3789a04774a73dc51c7f9b2ac34edb6/info, priority=13, startTime=1733574108444; duration=0sec 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c because midkey is the same as first or last row 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c because midkey is the same as first or last row 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c because midkey is the same as first or last row 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:48,887 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a3789a04774a73dc51c7f9b2ac34edb6:info 2024-12-07T12:21:49,061 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:49,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T12:21:49,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/fbe2a0a8a6dd49a3a000ff30bac778a7 is 1079, key is tmprow/info:/1733574109427/Put/seqid=0 2024-12-07T12:21:49,435 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,435 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:49,435 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741869_1052 2024-12-07T12:21:49,435 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:49,436 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,437 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:49,437 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741870_1053 2024-12-07T12:21:49,437 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:49,439 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,439 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59154 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:49,439 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:49,439 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054 2024-12-07T12:21:49,439 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59154 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:49,439 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59154 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59154 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:49,440 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:49,442 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59156 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:49,442 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:49,442 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59156 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:49,442 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055 2024-12-07T12:21:49,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:59156 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59156 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:49,442 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:49,443 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:49,443 WARN [IPC Server handler 1 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:49,443 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:49,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741873_1056 (size=6027) 2024-12-07T12:21:49,619 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/fbe2a0a8a6dd49a3a000ff30bac778a7 2024-12-07T12:21:49,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/fbe2a0a8a6dd49a3a000ff30bac778a7 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7 2024-12-07T12:21:49,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7, entries=1, sequenceid=45, filesize=5.9 K 2024-12-07T12:21:49,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a3789a04774a73dc51c7f9b2ac34edb6 in 430ms, sequenceid=45, compaction requested=false 2024-12-07T12:21:49,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:49,859 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-07T12:21:49,859 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:49,859 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c because midkey is the same as first or last row 2024-12-07T12:21:49,974 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,974 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]] 2024-12-07T12:21:49,974 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C46855%2C1733574087478:(num 1733574107956) roll requested 2024-12-07T12:21:49,974 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 2024-12-07T12:21:49,977 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,977 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:49,977 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741874_1057 2024-12-07T12:21:49,978 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:49,979 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,979 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:49,979 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741875_1058 2024-12-07T12:21:49,979 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:49,980 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,980 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:49,980 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741876_1059 2024-12-07T12:21:49,981 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:49,982 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:49,982 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:49,982 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741877_1060 2024-12-07T12:21:49,982 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:49,983 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:49,983 WARN [IPC Server handler 1 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:49,983 WARN [IPC Server handler 1 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:49,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:49,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:49,986 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:49,986 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:49,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:49,986 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 2024-12-07T12:21:49,986 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41629:41629)] 2024-12-07T12:21:49,987 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:49,987 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 is not closed yet, will try archiving it next time 2024-12-07T12:21:49,987 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C46855%2C1733574087478.1733574103942 2024-12-07T12:21:49,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741858_1041 (size=13591) 2024-12-07T12:21:50,388 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 is not closed yet, will try archiving it next time 2024-12-07T12:21:50,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:21:50,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T12:21:50,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/68bdbc12302b4306b77a2506cde781e8 is 1079, key is tmprow/info:/1733574110844/Put/seqid=0 2024-12-07T12:21:50,852 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:50,852 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:50,852 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741879_1062 2024-12-07T12:21:50,853 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:50,854 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:50,854 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:50,854 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741880_1063 2024-12-07T12:21:50,855 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:50,857 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42901 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:50,857 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43704 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064 to mirror 127.0.0.1:42901 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:50,857 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:50,857 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064 2024-12-07T12:21:50,857 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43704 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:50,857 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43704 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43704 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:50,858 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:50,860 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:50,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43710 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:50,860 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:50,860 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065 2024-12-07T12:21:50,860 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43710 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:50,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43710 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43710 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:50,860 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:50,861 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:50,861 WARN [IPC Server handler 2 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:50,861 WARN [IPC Server handler 2 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:50,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741883_1066 (size=6027) 2024-12-07T12:21:51,062 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/68bdbc12302b4306b77a2506cde781e8 2024-12-07T12:21:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/68bdbc12302b4306b77a2506cde781e8 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8 2024-12-07T12:21:51,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8, entries=1, sequenceid=55, filesize=5.9 K 2024-12-07T12:21:51,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a3789a04774a73dc51c7f9b2ac34edb6 in 432ms, sequenceid=55, compaction requested=true 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c because midkey is the same as first or last row 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a3789a04774a73dc51c7f9b2ac34edb6:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:21:51,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:51,278 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:21:51,279 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:21:51,279 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1541): a3789a04774a73dc51c7f9b2ac34edb6/info is initiating minor compaction (all files) 2024-12-07T12:21:51,279 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a3789a04774a73dc51c7f9b2ac34edb6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:21:51,279 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8] into tmpdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp, totalSize=29.3 K 2024-12-07T12:21:51,280 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting c481908d9af2431e921f3faa8df6bb4c, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733574101949 2024-12-07T12:21:51,280 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbe2a0a8a6dd49a3a000ff30bac778a7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733574109427 2024-12-07T12:21:51,281 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 68bdbc12302b4306b77a2506cde781e8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733574110844 2024-12-07T12:21:51,294 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a3789a04774a73dc51c7f9b2ac34edb6#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:21:51,295 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/10e3103fb08844c281da27ad9139378a is 1080, key is row0002/info:/1733574101949/Put/seqid=0 2024-12-07T12:21:51,296 WARN [Thread-975 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,297 WARN [Thread-975 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]) is bad. 2024-12-07T12:21:51,297 WARN [Thread-975 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741884_1067 2024-12-07T12:21:51,297 WARN [Thread-975 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42901,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK] 2024-12-07T12:21:51,298 WARN [Thread-975 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,298 WARN [Thread-975 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:51,298 WARN [Thread-975 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741885_1068 2024-12-07T12:21:51,299 WARN [Thread-975 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:51,300 WARN [Thread-975 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,300 WARN [Thread-975 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:51,300 WARN [Thread-975 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741886_1069 2024-12-07T12:21:51,300 WARN [Thread-975 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:51,302 WARN [Thread-975 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43734 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:51,302 WARN [Thread-975 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:51,302 WARN [Thread-975 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070 2024-12-07T12:21:51,302 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43734 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:21:51,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:43734 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43734 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:51,303 WARN [Thread-975 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:51,303 WARN [IPC Server handler 4 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T12:21:51,303 WARN [IPC Server handler 4 on default port 45085 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T12:21:51,303 WARN [IPC Server handler 4 on default port 45085 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T12:21:51,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741888_1071 (size=18097) 2024-12-07T12:21:51,619 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b564788[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741843_1026 to 127.0.0.1:42901 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:51,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6591b54a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741863_1046 to 127.0.0.1:38045 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:51,714 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/10e3103fb08844c281da27ad9139378a as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a 2024-12-07T12:21:51,720 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a3789a04774a73dc51c7f9b2ac34edb6/info of a3789a04774a73dc51c7f9b2ac34edb6 into 10e3103fb08844c281da27ad9139378a(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:21:51,720 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:21:51,720 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., storeName=a3789a04774a73dc51c7f9b2ac34edb6/info, priority=13, startTime=1733574111278; duration=0sec 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a because midkey is the same as first or last row 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a because midkey is the same as first or last row 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a because midkey is the same as first or last row 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:21:51,721 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a3789a04774a73dc51c7f9b2ac34edb6:info 2024-12-07T12:21:51,987 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:51,987 WARN [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-07T12:21:52,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:21:52,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:21:52,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:21:52,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:21:52,072 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:21:52,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:21:52,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:21:52,186 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39eaf0e6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/java.io.tmpdir/jetty-localhost-45051-hadoop-hdfs-3_4_1-tests_jar-_-any-11552998941169183084/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:21:52,186 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:45051} 2024-12-07T12:21:52,187 INFO [Time-limited test {}] server.Server(415): Started @129604ms 2024-12-07T12:21:52,188 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:21:52,291 WARN [Thread-994 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:21:52,301 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe88a2952ec50bcb7 with lease ID 0xbd7254818a6a0030: from storage DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb node DatanodeRegistration(127.0.0.1:39611, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=32927, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:21:52,301 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe88a2952ec50bcb7 with lease ID 0xbd7254818a6a0030: from storage DS-ceee7fcf-0713-4f8f-8cd6-4526b75824fc node DatanodeRegistration(127.0.0.1:39611, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=32927, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:21:52,644 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6591b54a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741868_1051 to 127.0.0.1:34911 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:52,644 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b564788[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741873_1056 to 127.0.0.1:34911 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:53,062 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:53,619 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:53,987 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:54,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741883_1066 (size=6027) 2024-12-07T12:21:54,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741858_1041 (size=13591) 2024-12-07T12:21:55,063 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:55,620 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:55,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b564788[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741888_1071 to 127.0.0.1:36957 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:55,988 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:57,063 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:57,349 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:21:57,620 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:57,988 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,141 ERROR [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData-prefix:27c6fcd7dac8,43989,1733574087391 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,141 WARN [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData-prefix:27c6fcd7dac8,43989,1733574087391 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,141 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C43989%2C1733574087391:(num 1733574088058) roll requested 2024-12-07T12:21:58,142 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C43989%2C1733574087391.1733574118142 2024-12-07T12:21:58,145 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,145 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:21:58,145 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741889_1072 2024-12-07T12:21:58,145 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:21:58,148 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:57136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4]'}, localName='127.0.0.1:39611', datanodeUuid='92cd3207-d7db-45e7-bcec-7240b42e4324', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:58,148 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:21:58,148 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073 2024-12-07T12:21:58,148 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:57136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T12:21:58,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:57136 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741890_1073] {}] datanode.DataXceiver(331): 127.0.0.1:39611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57136 dst: /127.0.0.1:39611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:21:58,148 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:21:58,149 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,150 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:21:58,150 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741891_1074 2024-12-07T12:21:58,150 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:21:58,154 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:58,154 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:58,154 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:58,154 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:58,154 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:21:58,154 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574118142 2024-12-07T12:21:58,155 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,155 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:58,155 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 2024-12-07T12:21:58,155 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41629:41629),(127.0.0.1/127.0.0.1:32927:32927)] 2024-12-07T12:21:58,155 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 is not closed yet, will try archiving it next time 2024-12-07T12:21:58,155 WARN [IPC Server handler 0 on default port 45085 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1006 2024-12-07T12:21:58,156 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 after 1ms 2024-12-07T12:21:59,063 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:21:59,988 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:01,064 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:01,989 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:02,157 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 after 4002ms 2024-12-07T12:22:02,316 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@22a46c2d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:38045,null,null]) java.net.ConnectException: Call From 27c6fcd7dac8/172.17.0.2 to localhost:35217 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T12:22:02,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741833_1021 (size=455) 2024-12-07T12:22:02,962 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C46855%2C1733574087478.1733574088425 2024-12-07T12:22:02,963 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C46855%2C1733574087478.1733574107956 2024-12-07T12:22:03,064 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:03,295 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@434f4a22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39611, datanodeUuid=92cd3207-d7db-45e7-bcec-7240b42e4324, infoPort=32927, infoSecurePort=0, ipcPort=36645, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741833_1021 to 127.0.0.1:36957 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:03,989 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,064 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,501 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.1733574125501 2024-12-07T12:22:05,505 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34911 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58506 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077 to mirror 127.0.0.1:34911 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,505 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:05,505 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077 2024-12-07T12:22:05,505 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58506 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T12:22:05,505 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58506 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741893_1077] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58506 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,506 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:05,507 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58508 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,508 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:05,508 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078 2024-12-07T12:22:05,508 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58508 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T12:22:05,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-893402062_22 at /127.0.0.1:58508 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58508 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,508 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:05,512 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:05,512 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:05,512 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:05,512 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:05,512 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:05,512 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574125501 2024-12-07T12:22:05,513 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32927:32927),(127.0.0.1/127.0.0.1:41629:41629)] 2024-12-07T12:22:05,513 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 is not closed yet, will try archiving it next time 2024-12-07T12:22:05,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741878_1061 (size=12911) 2024-12-07T12:22:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:22:05,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T12:22:05,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/090e08c3f3a2434ead0eb38182977de4 is 1080, key is row0013/info:/1733574125514/Put/seqid=0 2024-12-07T12:22:05,525 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34911 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,525 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58530 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080 to mirror 127.0.0.1:34911 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,526 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:05,526 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080 2024-12-07T12:22:05,526 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58530 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:05,526 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58530 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58530 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,526 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:05,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741897_1081 (size=8190) 2024-12-07T12:22:05,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741897_1081 (size=8190) 2024-12-07T12:22:05,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/090e08c3f3a2434ead0eb38182977de4 2024-12-07T12:22:05,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/090e08c3f3a2434ead0eb38182977de4 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4 2024-12-07T12:22:05,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4, entries=3, sequenceid=66, filesize=8.0 K 2024-12-07T12:22:05,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for a3789a04774a73dc51c7f9b2ac34edb6 in 32ms, sequenceid=66, compaction requested=false 2024-12-07T12:22:05,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:22:05,551 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-07T12:22:05,551 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:22:05,551 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a because midkey is the same as first or last row 2024-12-07T12:22:05,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46855 {}] regionserver.HRegion(8855): Flush requested on a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:22:05,738 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-07T12:22:05,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/72709d89cbf447ff98ea3bbe53adf35a is 1080, key is row0015/info:/1733574125519/Put/seqid=0 2024-12-07T12:22:05,745 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48856 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4]'}, localName='127.0.0.1:39611', datanodeUuid='92cd3207-d7db-45e7-bcec-7240b42e4324', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,745 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:05,745 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082 2024-12-07T12:22:05,745 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48856 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:05,745 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48856 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741898_1082] {}] datanode.DataXceiver(331): 127.0.0.1:39611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48856 dst: /127.0.0.1:39611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,746 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:05,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48860 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4]'}, localName='127.0.0.1:39611', datanodeUuid='92cd3207-d7db-45e7-bcec-7240b42e4324', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083 to mirror 127.0.0.1:34911 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,748 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34911 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,748 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48860 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:05,748 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:05,748 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083 2024-12-07T12:22:05,748 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48860 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741899_1083] {}] datanode.DataXceiver(331): 127.0.0.1:39611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48860 dst: /127.0.0.1:39611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,748 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:05,749 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,749 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741900_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:05,749 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741900_1084 2024-12-07T12:22:05,750 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741901_1085 (size=14660) 2024-12-07T12:22:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741901_1085 (size=14660) 2024-12-07T12:22:05,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/72709d89cbf447ff98ea3bbe53adf35a 2024-12-07T12:22:05,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/72709d89cbf447ff98ea3bbe53adf35a as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a 2024-12-07T12:22:05,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a, entries=9, sequenceid=79, filesize=14.3 K 2024-12-07T12:22:05,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for a3789a04774a73dc51c7f9b2ac34edb6 in 28ms, sequenceid=79, compaction requested=true 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a because midkey is the same as first or last row 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a3789a04774a73dc51c7f9b2ac34edb6:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:22:05,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:22:05,767 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:22:05,768 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:22:05,768 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1541): a3789a04774a73dc51c7f9b2ac34edb6/info is initiating minor compaction (all files) 2024-12-07T12:22:05,768 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a3789a04774a73dc51c7f9b2ac34edb6/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:05,768 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a] into tmpdir=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp, totalSize=40.0 K 2024-12-07T12:22:05,768 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 10e3103fb08844c281da27ad9139378a, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733574101949 2024-12-07T12:22:05,769 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 090e08c3f3a2434ead0eb38182977de4, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1733574111856 2024-12-07T12:22:05,769 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72709d89cbf447ff98ea3bbe53adf35a, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733574125519 2024-12-07T12:22:05,780 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a3789a04774a73dc51c7f9b2ac34edb6#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:22:05,780 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/bfb8e5fd5d86489aa00a74f6e219e43f is 1080, key is row0002/info:/1733574101949/Put/seqid=0 2024-12-07T12:22:05,783 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48892 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4]'}, localName='127.0.0.1:39611', datanodeUuid='92cd3207-d7db-45e7-bcec-7240b42e4324', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,783 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:05,783 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48892 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:05,783 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086 2024-12-07T12:22:05,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:48892 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741902_1086] {}] datanode.DataXceiver(331): 127.0.0.1:39611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48892 dst: /127.0.0.1:39611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:05,783 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:05,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741903_1087 (size=28989) 2024-12-07T12:22:05,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741903_1087 (size=28989) 2024-12-07T12:22:05,793 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/bfb8e5fd5d86489aa00a74f6e219e43f as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/bfb8e5fd5d86489aa00a74f6e219e43f 2024-12-07T12:22:05,799 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a3789a04774a73dc51c7f9b2ac34edb6/info of a3789a04774a73dc51c7f9b2ac34edb6 into bfb8e5fd5d86489aa00a74f6e219e43f(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a3789a04774a73dc51c7f9b2ac34edb6: 2024-12-07T12:22:05,799 INFO [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., storeName=a3789a04774a73dc51c7f9b2ac34edb6/info, priority=13, startTime=1733574125767; duration=0sec 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/bfb8e5fd5d86489aa00a74f6e219e43f because midkey is the same as first or last row 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-07T12:22:05,799 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/bfb8e5fd5d86489aa00a74f6e219e43f because midkey is the same as first or last row 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/bfb8e5fd5d86489aa00a74f6e219e43f because midkey is the same as first or last row 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:22:05,800 DEBUG [RS:0;27c6fcd7dac8:46855-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a3789a04774a73dc51c7f9b2ac34edb6:info 2024-12-07T12:22:05,914 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C46855%2C1733574087478.1733574109974 2024-12-07T12:22:05,989 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:05,989 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-07T12:22:06,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:22:06,141 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:22:06,141 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:06,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:06,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:06,142 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:22:06,142 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:22:06,142 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1036791079, stopped=false 2024-12-07T12:22:06,142 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,43989,1733574087391 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:06,144 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:22:06,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:06,144 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:22:06,144 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:06,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:06,145 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,46855,1733574087478' ***** 2024-12-07T12:22:06,145 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:22:06,145 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,37667,1733574088962' ***** 2024-12-07T12:22:06,145 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:22:06,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:06,145 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:22:06,145 INFO [RS:0;27c6fcd7dac8:46855 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,37667,1733574088962 2024-12-07T12:22:06,145 INFO [RS:0;27c6fcd7dac8:46855 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:22:06,145 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:22:06,145 INFO [RS:1;27c6fcd7dac8:37667 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;27c6fcd7dac8:37667. 2024-12-07T12:22:06,145 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:22:06,145 DEBUG [RS:1;27c6fcd7dac8:37667 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:06,145 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(3091): Received CLOSE for a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:22:06,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:06,145 DEBUG [RS:1;27c6fcd7dac8:37667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:06,146 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:06,146 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,37667,1733574088962; all regions closed. 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,46855,1733574087478 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:46855. 2024-12-07T12:22:06,146 DEBUG [RS:0;27c6fcd7dac8:46855 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:06,146 DEBUG [RS:0;27c6fcd7dac8:46855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:06,146 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a3789a04774a73dc51c7f9b2ac34edb6, disabling compactions & flushes 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:22:06,146 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:22:06,146 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:06,146 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. after waiting 0 ms 2024-12-07T12:22:06,146 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:06,146 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:22:06,146 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a3789a04774a73dc51c7f9b2ac34edb6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:22:06,147 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T12:22:06,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,147 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1325): Online Regions={a3789a04774a73dc51c7f9b2ac34edb6=TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T12:22:06,147 DEBUG [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a3789a04774a73dc51c7f9b2ac34edb6 2024-12-07T12:22:06,147 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:22:06,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,147 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:22:06,147 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:22:06,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,147 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:22:06,147 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:22:06,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,147 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-07T12:22:06,148 ERROR [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3-prefix:27c6fcd7dac8,46855,1733574087478.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,148 WARN [FSHLog-0-hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3-prefix:27c6fcd7dac8,46855,1733574087478.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,148 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 2024-12-07T12:22:06,148 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C46855%2C1733574087478.meta:.meta(num 1733574088764) roll requested 2024-12-07T12:22:06,148 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574126148.meta 2024-12-07T12:22:06,148 WARN [IPC Server handler 4 on default port 45085 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 has not been closed. Lease recovery is in progress. RecoveryId = 1088 for block blk_1073741837_1013 2024-12-07T12:22:06,149 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 after 1ms 2024-12-07T12:22:06,150 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,151 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741904_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:06,151 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741904_1089 2024-12-07T12:22:06,151 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:06,151 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/d81e6abbfd104ea09100656b6656d480 is 1079, key is tmprow/info:/1733574125939/Put/seqid=0 2024-12-07T12:22:06,152 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,152 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741905_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:06,152 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741905_1090 2024-12-07T12:22:06,153 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:06,153 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,153 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741906_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:06,153 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741906_1091 2024-12-07T12:22:06,153 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:06,154 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,154 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741907_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:06,154 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741907_1092 2024-12-07T12:22:06,154 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:06,154 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,155 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741908_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:06,155 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741908_1093 2024-12-07T12:22:06,155 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:06,156 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,156 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741910_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:06,156 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741910_1095 2024-12-07T12:22:06,157 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:06,162 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,162 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,162 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,163 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574126148.meta 2024-12-07T12:22:06,163 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,163 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,163 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta 2024-12-07T12:22:06,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741911_1096 (size=6027) 2024-12-07T12:22:06,164 WARN [IPC Server handler 0 on default port 45085 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta has not been closed. Lease recovery is in progress. RecoveryId = 1097 for block blk_1073741834_1010 2024-12-07T12:22:06,164 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta after 1ms 2024-12-07T12:22:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741911_1096 (size=6027) 2024-12-07T12:22:06,164 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/d81e6abbfd104ea09100656b6656d480 2024-12-07T12:22:06,165 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32927:32927),(127.0.0.1/127.0.0.1:41629:41629)] 2024-12-07T12:22:06,165 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta is not closed yet, will try archiving it next time 2024-12-07T12:22:06,170 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/.tmp/info/d81e6abbfd104ea09100656b6656d480 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/d81e6abbfd104ea09100656b6656d480 2024-12-07T12:22:06,175 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/d81e6abbfd104ea09100656b6656d480, entries=1, sequenceid=84, filesize=5.9 K 2024-12-07T12:22:06,176 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a3789a04774a73dc51c7f9b2ac34edb6 in 30ms, sequenceid=84, compaction requested=false 2024-12-07T12:22:06,176 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a] to archive 2024-12-07T12:22:06,177 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:22:06,179 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/621eb98441604c859fe648f3621309a5 2024-12-07T12:22:06,180 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/4b1faa2dfec64e9794c322bddde7a9c0 2024-12-07T12:22:06,181 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/c481908d9af2431e921f3faa8df6bb4c 2024-12-07T12:22:06,182 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/886c55735bcd4e8eba77be9b7d6e65be 2024-12-07T12:22:06,183 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/fbe2a0a8a6dd49a3a000ff30bac778a7 2024-12-07T12:22:06,185 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/10e3103fb08844c281da27ad9139378a 2024-12-07T12:22:06,186 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/68bdbc12302b4306b77a2506cde781e8 2024-12-07T12:22:06,187 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/090e08c3f3a2434ead0eb38182977de4 2024-12-07T12:22:06,188 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/info/fd69b2c506e545dbb54b05647489d3f5 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6./info:regioninfo/1733574089501/Put/seqid=0 2024-12-07T12:22:06,188 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/info/72709d89cbf447ff98ea3bbe53adf35a 2024-12-07T12:22:06,188 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=27c6fcd7dac8:43989 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T12:22:06,189 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [621eb98441604c859fe648f3621309a5=10347, 4b1faa2dfec64e9794c322bddde7a9c0=12506, c481908d9af2431e921f3faa8df6bb4c=17994, 886c55735bcd4e8eba77be9b7d6e65be=6027, fbe2a0a8a6dd49a3a000ff30bac778a7=6027, 10e3103fb08844c281da27ad9139378a=18097, 68bdbc12302b4306b77a2506cde781e8=6027, 090e08c3f3a2434ead0eb38182977de4=8190, 72709d89cbf447ff98ea3bbe53adf35a=14660] 2024-12-07T12:22:06,190 WARN [Thread-1073 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58624 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,191 WARN [Thread-1073 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:06,191 WARN [Thread-1073 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098 2024-12-07T12:22:06,191 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58624 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098] {}] datanode.BlockReceiver(316): Block 1073741912 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:06,191 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58624 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741912_1098] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58624 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,191 WARN [Thread-1073 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:06,192 WARN [Thread-1073 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741913_1099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,193 WARN [Thread-1073 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741913_1099 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:06,193 WARN [Thread-1073 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741913_1099 2024-12-07T12:22:06,193 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a3789a04774a73dc51c7f9b2ac34edb6/recovered.edits/87.seqid, newMaxSeqId=87, maxSeqId=1 2024-12-07T12:22:06,193 WARN [Thread-1073 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:06,194 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:06,194 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a3789a04774a73dc51c7f9b2ac34edb6: Waiting for close lock at 1733574126146Running coprocessor pre-close hooks at 1733574126146Disabling compacts and flushes for region at 1733574126146Disabling writes for close at 1733574126146Obtaining lock to block concurrent updates at 1733574126146Preparing flush snapshotting stores in a3789a04774a73dc51c7f9b2ac34edb6 at 1733574126146Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733574126147 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. at 1733574126148 (+1 ms)Flushing a3789a04774a73dc51c7f9b2ac34edb6/info: creating writer at 1733574126148Flushing a3789a04774a73dc51c7f9b2ac34edb6/info: appending metadata at 1733574126151 (+3 ms)Flushing a3789a04774a73dc51c7f9b2ac34edb6/info: closing flushed file at 1733574126151Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b8408f2: reopening flushed file at 1733574126170 (+19 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a3789a04774a73dc51c7f9b2ac34edb6 in 30ms, sequenceid=84, compaction requested=false at 1733574126176 (+6 ms)Writing region close event to WAL at 1733574126189 (+13 ms)Running coprocessor post-close hooks at 1733574126194 (+5 ms)Closed at 1733574126194 2024-12-07T12:22:06,194 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733574089097.a3789a04774a73dc51c7f9b2ac34edb6. 2024-12-07T12:22:06,194 WARN [Thread-1073 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741914_1100 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,194 WARN [Thread-1073 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741914_1100 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:06,194 WARN [Thread-1073 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741914_1100 2024-12-07T12:22:06,195 WARN [Thread-1073 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741915_1101 (size=7089) 2024-12-07T12:22:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741915_1101 (size=7089) 2024-12-07T12:22:06,199 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/info/fd69b2c506e545dbb54b05647489d3f5 2024-12-07T12:22:06,220 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/ns/74851f32b05848d88e1e2e4e85f609ce is 43, key is default/ns:d/1733574088823/Put/seqid=0 2024-12-07T12:22:06,221 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741916_1102 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,221 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741916_1102 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:06,221 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741916_1102 2024-12-07T12:22:06,222 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:06,223 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741917_1103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,223 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741917_1103 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:06,223 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741917_1103 2024-12-07T12:22:06,223 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:06,225 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741918_1104 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38045 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58642 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104 to mirror 127.0.0.1:38045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,225 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:06,225 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104 2024-12-07T12:22:06,225 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58642 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104] {}] datanode.BlockReceiver(316): Block 1073741918 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:06,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58642 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741918_1104] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58642 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,226 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:06,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741919_1105 (size=5153) 2024-12-07T12:22:06,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741919_1105 (size=5153) 2024-12-07T12:22:06,230 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/ns/74851f32b05848d88e1e2e4e85f609ce 2024-12-07T12:22:06,250 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/table/d6792370ab7b4734adb92c196e09cd29 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733574089513/Put/seqid=0 2024-12-07T12:22:06,251 WARN [Thread-1088 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741920_1106 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,251 WARN [Thread-1088 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741920_1106 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK]) is bad. 2024-12-07T12:22:06,251 WARN [Thread-1088 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741920_1106 2024-12-07T12:22:06,252 WARN [Thread-1088 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34911,DS-0ee68ff9-3a86-4043-89c8-9dc291195038,DISK] 2024-12-07T12:22:06,254 WARN [Thread-1088 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741921_1107 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36957 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58662 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8]'}, localName='127.0.0.1:39315', datanodeUuid='a88a03a9-2854-41f9-b710-2efe6d2391d7', xmitsInProgress=0}:Exception transferring block BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107 to mirror 127.0.0.1:36957 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,254 WARN [Thread-1088 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39315,DS-98ba4bde-249b-4bad-8a38-d87428fa47ee,DISK], DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK]) is bad. 2024-12-07T12:22:06,254 WARN [Thread-1088 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107 2024-12-07T12:22:06,254 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58662 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107] {}] datanode.BlockReceiver(316): Block 1073741921 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T12:22:06,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1448702490_22 at /127.0.0.1:58662 [Receiving block BP-1950294605-172.17.0.2-1733574086387:blk_1073741921_1107] {}] datanode.DataXceiver(331): 127.0.0.1:39315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58662 dst: /127.0.0.1:39315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:06,254 WARN [Thread-1088 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36957,DS-b1d80ca4-6c94-45cf-963b-160ca3a22b1e,DISK] 2024-12-07T12:22:06,255 WARN [Thread-1088 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741922_1108 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:06,255 WARN [Thread-1088 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1950294605-172.17.0.2-1733574086387:blk_1073741922_1108 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK], DatanodeInfoWithStorage[127.0.0.1:39611,DS-bfcb3ebb-d23a-4fff-9bd8-d3dba709f5cb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK]) is bad. 2024-12-07T12:22:06,255 WARN [Thread-1088 {}] hdfs.DataStreamer(1850): Abandoning BP-1950294605-172.17.0.2-1733574086387:blk_1073741922_1108 2024-12-07T12:22:06,256 WARN [Thread-1088 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38045,DS-e767951a-f65f-4302-8516-77a19232ec3c,DISK] 2024-12-07T12:22:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741923_1109 (size=5424) 2024-12-07T12:22:06,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741923_1109 (size=5424) 2024-12-07T12:22:06,261 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/table/d6792370ab7b4734adb92c196e09cd29 2024-12-07T12:22:06,266 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/info/fd69b2c506e545dbb54b05647489d3f5 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/info/fd69b2c506e545dbb54b05647489d3f5 2024-12-07T12:22:06,271 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T12:22:06,271 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:06,271 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T12:22:06,272 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/info/fd69b2c506e545dbb54b05647489d3f5, entries=10, sequenceid=11, filesize=6.9 K 2024-12-07T12:22:06,273 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/ns/74851f32b05848d88e1e2e4e85f609ce as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/ns/74851f32b05848d88e1e2e4e85f609ce 2024-12-07T12:22:06,278 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/ns/74851f32b05848d88e1e2e4e85f609ce, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T12:22:06,279 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/.tmp/table/d6792370ab7b4734adb92c196e09cd29 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/table/d6792370ab7b4734adb92c196e09cd29 2024-12-07T12:22:06,285 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/table/d6792370ab7b4734adb92c196e09cd29, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T12:22:06,286 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false 2024-12-07T12:22:06,291 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T12:22:06,291 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:06,291 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:06,292 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574126147Running coprocessor pre-close hooks at 1733574126147Disabling compacts and flushes for region at 1733574126147Disabling writes for close at 1733574126147Obtaining lock to block concurrent updates at 1733574126147Preparing flush snapshotting stores in 1588230740 at 1733574126147Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733574126148 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733574126165 (+17 ms)Flushing 1588230740/info: creating writer at 1733574126165Flushing 1588230740/info: appending metadata at 1733574126187 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733574126187Flushing 1588230740/ns: creating writer at 1733574126204 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733574126219 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733574126219Flushing 1588230740/table: creating writer at 1733574126236 (+17 ms)Flushing 1588230740/table: appending metadata at 1733574126249 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733574126249Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b4cf048: reopening flushed file at 1733574126265 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@627cc927: reopening flushed file at 1733574126272 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@576c32b6: reopening flushed file at 1733574126279 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false at 1733574126286 (+7 ms)Writing region close event to WAL at 1733574126287 (+1 ms)Running coprocessor post-close hooks at 1733574126291 (+4 ms)Closed at 1733574126291 2024-12-07T12:22:06,292 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:06,347 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,46855,1733574087478; all regions closed. 2024-12-07T12:22:06,347 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,347 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,348 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:06,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741909_1094 (size=825) 2024-12-07T12:22:06,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741909_1094 (size=825) 2024-12-07T12:22:06,645 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6b564788[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39315, datanodeUuid=a88a03a9-2854-41f9-b710-2efe6d2391d7, infoPort=41629, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=2019329852;c=1733574086387):Failed to transfer BP-1950294605-172.17.0.2-1733574086387:blk_1073741878_1061 to 127.0.0.1:36957 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:07,060 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T12:22:07,060 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T12:22:07,061 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:08,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T12:22:08,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:08,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:22:08,906 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T12:22:08,906 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T12:22:09,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:22:09,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741835_1011 (size=393) 2024-12-07T12:22:10,150 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 after 4002ms 2024-12-07T12:22:10,165 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta after 4002ms 2024-12-07T12:22:10,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:22:10,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:22:11,148 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-07T12:22:11,150 DEBUG [RS:1;27c6fcd7dac8:37667 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C37667%2C1733574088962:(num 1733574089197) 2024-12-07T12:22:11,150 DEBUG [RS:1;27c6fcd7dac8:37667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:22:11,150 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:22:11,151 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:22:11,151 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:22:11,151 INFO [RS:1;27c6fcd7dac8:37667 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37667 2024-12-07T12:22:11,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:22:11,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,37667,1733574088962 2024-12-07T12:22:11,153 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:22:11,153 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,37667,1733574088962] 2024-12-07T12:22:11,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:11,157 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,37667,1733574088962 already deleted, retry=false 2024-12-07T12:22:11,157 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,37667,1733574088962 expired; onlineServers=1 2024-12-07T12:22:11,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:11,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37667-0x1018cdf3d480002, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:11,254 INFO [RS:1;27c6fcd7dac8:37667 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:22:11,254 INFO [RS:1;27c6fcd7dac8:37667 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,37667,1733574088962; zookeeper connection closed. 2024-12-07T12:22:11,254 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@58e63d5e 2024-12-07T12:22:11,348 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-07T12:22:11,351 DEBUG [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs 2024-12-07T12:22:11,351 INFO [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C46855%2C1733574087478.meta:.meta(num 1733574126148) 2024-12-07T12:22:11,352 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,352 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,352 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,352 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,352 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741895_1079 (size=18156) 2024-12-07T12:22:11,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741895_1079 (size=18156) 2024-12-07T12:22:11,356 DEBUG [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs 2024-12-07T12:22:11,356 INFO [RS:0;27c6fcd7dac8:46855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C46855%2C1733574087478:(num 1733574125501) 2024-12-07T12:22:11,357 DEBUG [RS:0;27c6fcd7dac8:46855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:11,357 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:11,357 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:22:11,357 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T12:22:11,357 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:22:11,357 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:22:11,357 INFO [RS:0;27c6fcd7dac8:46855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46855 2024-12-07T12:22:11,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,46855,1733574087478 2024-12-07T12:22:11,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:22:11,359 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:22:11,360 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,46855,1733574087478] 2024-12-07T12:22:11,361 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,46855,1733574087478 already deleted, retry=false 2024-12-07T12:22:11,361 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,46855,1733574087478 expired; onlineServers=0 2024-12-07T12:22:11,361 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,43989,1733574087391' ***** 2024-12-07T12:22:11,361 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:22:11,361 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:22:11,361 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:22:11,361 DEBUG [M:0;27c6fcd7dac8:43989 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:22:11,362 DEBUG [M:0;27c6fcd7dac8:43989 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:22:11,362 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:22:11,362 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574088144 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574088144,5,FailOnTimeoutGroup] 2024-12-07T12:22:11,362 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574088142 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574088142,5,FailOnTimeoutGroup] 2024-12-07T12:22:11,362 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:22:11,362 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:22:11,362 DEBUG [M:0;27c6fcd7dac8:43989 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:22:11,362 INFO [M:0;27c6fcd7dac8:43989 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:22:11,362 INFO [M:0;27c6fcd7dac8:43989 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:22:11,362 INFO [M:0;27c6fcd7dac8:43989 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:22:11,362 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:22:11,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:22:11,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:11,365 DEBUG [M:0;27c6fcd7dac8:43989 {}] zookeeper.ZKUtil(347): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:22:11,365 WARN [M:0;27c6fcd7dac8:43989 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:22:11,365 INFO [M:0;27c6fcd7dac8:43989 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/.lastflushedseqids 2024-12-07T12:22:11,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741924_1110 (size=130) 2024-12-07T12:22:11,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741924_1110 (size=130) 2024-12-07T12:22:11,371 INFO [M:0;27c6fcd7dac8:43989 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:22:11,371 INFO [M:0;27c6fcd7dac8:43989 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:22:11,371 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:22:11,371 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:11,371 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:11,371 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:22:11,371 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:11,372 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-07T12:22:11,395 DEBUG [M:0;27c6fcd7dac8:43989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09615a5ce24c4c4aa93ee10bc28ed208 is 82, key is hbase:meta,,1/info:regioninfo/1733574088800/Put/seqid=0 2024-12-07T12:22:11,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741925_1111 (size=5672) 2024-12-07T12:22:11,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741925_1111 (size=5672) 2024-12-07T12:22:11,400 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09615a5ce24c4c4aa93ee10bc28ed208 2024-12-07T12:22:11,419 DEBUG [M:0;27c6fcd7dac8:43989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43a565edbe96484399a66e63212e1b0c is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733574089518/Put/seqid=0 2024-12-07T12:22:11,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741926_1112 (size=6255) 2024-12-07T12:22:11,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741926_1112 (size=6255) 2024-12-07T12:22:11,424 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43a565edbe96484399a66e63212e1b0c 2024-12-07T12:22:11,428 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 43a565edbe96484399a66e63212e1b0c 2024-12-07T12:22:11,443 DEBUG [M:0;27c6fcd7dac8:43989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/24b07e216d054b409d39abc73bb7b27a is 69, key is 27c6fcd7dac8,37667,1733574088962/rs:state/1733574089044/Put/seqid=0 2024-12-07T12:22:11,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741927_1113 (size=5224) 2024-12-07T12:22:11,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741927_1113 (size=5224) 2024-12-07T12:22:11,448 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/24b07e216d054b409d39abc73bb7b27a 2024-12-07T12:22:11,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:11,460 INFO [RS:0;27c6fcd7dac8:46855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:22:11,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46855-0x1018cdf3d480001, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:11,460 INFO [RS:0;27c6fcd7dac8:46855 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,46855,1733574087478; zookeeper connection closed. 2024-12-07T12:22:11,461 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e0ca73d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e0ca73d 2024-12-07T12:22:11,461 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-07T12:22:11,467 DEBUG [M:0;27c6fcd7dac8:43989 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/095db35d7b74498cbd208372641bba8c is 52, key is load_balancer_on/state:d/1733574088935/Put/seqid=0 2024-12-07T12:22:11,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741928_1114 (size=5056) 2024-12-07T12:22:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741928_1114 (size=5056) 2024-12-07T12:22:11,472 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/095db35d7b74498cbd208372641bba8c 2024-12-07T12:22:11,478 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/09615a5ce24c4c4aa93ee10bc28ed208 as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09615a5ce24c4c4aa93ee10bc28ed208 2024-12-07T12:22:11,482 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/09615a5ce24c4c4aa93ee10bc28ed208, entries=8, sequenceid=60, filesize=5.5 K 2024-12-07T12:22:11,483 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/43a565edbe96484399a66e63212e1b0c as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/43a565edbe96484399a66e63212e1b0c 2024-12-07T12:22:11,487 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 43a565edbe96484399a66e63212e1b0c 2024-12-07T12:22:11,487 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/43a565edbe96484399a66e63212e1b0c, entries=6, sequenceid=60, filesize=6.1 K 2024-12-07T12:22:11,488 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/24b07e216d054b409d39abc73bb7b27a as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/24b07e216d054b409d39abc73bb7b27a 2024-12-07T12:22:11,493 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/24b07e216d054b409d39abc73bb7b27a, entries=2, sequenceid=60, filesize=5.1 K 2024-12-07T12:22:11,494 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/095db35d7b74498cbd208372641bba8c as hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/095db35d7b74498cbd208372641bba8c 2024-12-07T12:22:11,498 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/095db35d7b74498cbd208372641bba8c, entries=1, sequenceid=60, filesize=4.9 K 2024-12-07T12:22:11,499 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false 2024-12-07T12:22:11,500 INFO [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:11,500 DEBUG [M:0;27c6fcd7dac8:43989 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574131371Disabling compacts and flushes for region at 1733574131371Disabling writes for close at 1733574131371Obtaining lock to block concurrent updates at 1733574131372 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574131372Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733574131372Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574131373 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574131373Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574131394 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574131394Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574131404 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574131418 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574131418Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574131429 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574131442 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574131442Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574131453 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574131467 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574131467Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45e47410: reopening flushed file at 1733574131477 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31d9327f: reopening flushed file at 1733574131482 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34b63a5f: reopening flushed file at 1733574131487 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34c8c8ff: reopening flushed file at 1733574131493 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false at 1733574131499 (+6 ms)Writing region close event to WAL at 1733574131500 (+1 ms)Closed at 1733574131500 2024-12-07T12:22:11,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:11,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39611 is added to blk_1073741892_1075 (size=1045) 2024-12-07T12:22:11,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741892_1075 (size=1045) 2024-12-07T12:22:11,721 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:22:11,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:11,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:12,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:12,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:12,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741836_1012 (size=76) 2024-12-07T12:22:12,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39315 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:22:12,319 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3bcdf559 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:38045,null,null]) java.net.ConnectException: Call From 27c6fcd7dac8/172.17.0.2 to localhost:35217 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T12:22:13,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:13,164 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/WALs/27c6fcd7dac8,43989,1733574087391/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/oldWALs/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 2024-12-07T12:22:13,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:13,167 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/MasterData/oldWALs/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058 to hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/oldWALs/27c6fcd7dac8%2C43989%2C1733574087391.1733574088058$masterlocalwal$ 2024-12-07T12:22:13,167 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:22:13,167 INFO [M:0;27c6fcd7dac8:43989 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:22:13,167 INFO [M:0;27c6fcd7dac8:43989 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43989 2024-12-07T12:22:13,168 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:22:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:13,270 INFO [M:0;27c6fcd7dac8:43989 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:22:13,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43989-0x1018cdf3d480000, quorum=127.0.0.1:62922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:13,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39eaf0e6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:13,272 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:13,272 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:13,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:13,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:13,274 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:13,274 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:13,274 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid 92cd3207-d7db-45e7-bcec-7240b42e4324) service to localhost/127.0.0.1:45085 2024-12-07T12:22:13,274 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:13,274 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38045,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:35217 , LocalHost:localPort 27c6fcd7dac8/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T12:22:13,275 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39611,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1950294605-172.17.0.2-1733574086387 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:13,275 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data3/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:13,275 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38045,null,null], DatanodeInfoWithStorage[127.0.0.1:39611,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38045,null,null], DatanodeInfoWithStorage[127.0.0.1:39611,null,null]] 2024-12-07T12:22:13,275 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data4/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:13,275 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39611,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1950294605-172.17.0.2-1733574086387 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:13,275 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:13,275 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38045,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1950294605-172.17.0.2-1733574086387 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:13,275 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@428b9362 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39611,null,null], DatanodeInfoWithStorage[127.0.0.1:38045,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1950294605-172.17.0.2-1733574086387:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:39611,null,null], DatanodeInfoWithStorage[127.0.0.1:38045,null,null]] 2024-12-07T12:22:13,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b4117c9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:13,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ccc1bc4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:13,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:13,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ca82099{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:13,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@156f3a55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:13,279 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:13,279 WARN [BP-1950294605-172.17.0.2-1733574086387 heartbeating to localhost/127.0.0.1:45085 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950294605-172.17.0.2-1733574086387 (Datanode Uuid a88a03a9-2854-41f9-b710-2efe6d2391d7) service to localhost/127.0.0.1:45085 2024-12-07T12:22:13,279 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:13,279 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:13,280 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data7/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:13,280 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/cluster_fdf22117-0357-3157-d242-7482532bd264/data/data8/current/BP-1950294605-172.17.0.2-1733574086387 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:13,281 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:13,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35d13a28{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:22:13,287 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d790455{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:13,287 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:13,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f1f9cf1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:13,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24befc55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:13,295 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:22:13,323 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:22:13,334 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 80) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45085 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:45085 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45085 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45085 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45085 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45085 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:45085 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45085 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45085 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36683 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fbd80befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36683 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45085 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fbd80befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45085 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=105 (was 129), ProcessCount=11 (was 11), AvailableMemoryMB=6367 (was 6311) - AvailableMemoryMB LEAK? - 2024-12-07T12:22:13,341 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=105, ProcessCount=11, AvailableMemoryMB=6367 2024-12-07T12:22:13,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:22:13,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.log.dir so I do NOT create it in target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6d6d0711-1c1d-d61d-523e-ae7fee9851b6/hadoop.tmp.dir so I do NOT create it in target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd, deleteOnExit=true 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/test.cache.data in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:22:13,342 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:22:13,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:22:13,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:22:13,357 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:22:13,437 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:13,441 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:13,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:13,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:13,443 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:13,443 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:13,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45628471{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:13,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bf7054a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:13,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71718145{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-34683-hadoop-hdfs-3_4_1-tests_jar-_-any-18219641010050666438/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:22:13,558 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37ba1ac4{HTTP/1.1, (http/1.1)}{localhost:34683} 2024-12-07T12:22:13,558 INFO [Time-limited test {}] server.Server(415): Started @150976ms 2024-12-07T12:22:13,571 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:22:13,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:13,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:13,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:13,651 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:13,651 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:22:13,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a928dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:13,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719d6bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:13,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16178224{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-34093-hadoop-hdfs-3_4_1-tests_jar-_-any-15001600258397586335/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:13,768 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34c1099f{HTTP/1.1, (http/1.1)}{localhost:34093} 2024-12-07T12:22:13,769 INFO [Time-limited test {}] server.Server(415): Started @151186ms 2024-12-07T12:22:13,770 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:13,798 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:13,801 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:13,801 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:13,802 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:13,802 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:13,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8825f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:13,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718cd5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:13,872 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data1/current/BP-688210996-172.17.0.2-1733574133374/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:13,872 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data2/current/BP-688210996-172.17.0.2-1733574133374/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:13,891 WARN [Thread-1191 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85fb322b45695355 with lease ID 0xa7ce7d421ca39b20: Processing first storage report for DS-a35f727f-c8b9-4651-ba3c-f3286805dda7 from datanode DatanodeRegistration(127.0.0.1:34651, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=45401, infoSecurePort=0, ipcPort=43005, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374) 2024-12-07T12:22:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85fb322b45695355 with lease ID 0xa7ce7d421ca39b20: from storage DS-a35f727f-c8b9-4651-ba3c-f3286805dda7 node DatanodeRegistration(127.0.0.1:34651, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=45401, infoSecurePort=0, ipcPort=43005, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85fb322b45695355 with lease ID 0xa7ce7d421ca39b20: Processing first storage report for DS-7838bdac-74b2-417c-92f9-c7f09d4fe723 from datanode DatanodeRegistration(127.0.0.1:34651, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=45401, infoSecurePort=0, ipcPort=43005, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374) 2024-12-07T12:22:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85fb322b45695355 with lease ID 0xa7ce7d421ca39b20: from storage DS-7838bdac-74b2-417c-92f9-c7f09d4fe723 node DatanodeRegistration(127.0.0.1:34651, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=45401, infoSecurePort=0, ipcPort=43005, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:13,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d10ba6a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-42899-hadoop-hdfs-3_4_1-tests_jar-_-any-11825321870463412235/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:13,923 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b91ed3f{HTTP/1.1, (http/1.1)}{localhost:42899} 2024-12-07T12:22:13,923 INFO [Time-limited test {}] server.Server(415): Started @151341ms 2024-12-07T12:22:13,925 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:14,044 WARN [Thread-1239 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data4/current/BP-688210996-172.17.0.2-1733574133374/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:14,044 WARN [Thread-1238 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data3/current/BP-688210996-172.17.0.2-1733574133374/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:14,061 WARN [Thread-1227 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:14,064 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c7a7219b40993ec with lease ID 0xa7ce7d421ca39b21: Processing first storage report for DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1 from datanode DatanodeRegistration(127.0.0.1:37577, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=43763, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374) 2024-12-07T12:22:14,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c7a7219b40993ec with lease ID 0xa7ce7d421ca39b21: from storage DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1 node DatanodeRegistration(127.0.0.1:37577, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=43763, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:14,064 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c7a7219b40993ec with lease ID 0xa7ce7d421ca39b21: Processing first storage report for DS-901701bd-d3d0-4ba2-95c7-1e557bda66c2 from datanode DatanodeRegistration(127.0.0.1:37577, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=43763, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374) 2024-12-07T12:22:14,064 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c7a7219b40993ec with lease ID 0xa7ce7d421ca39b21: from storage DS-901701bd-d3d0-4ba2-95c7-1e557bda66c2 node DatanodeRegistration(127.0.0.1:37577, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=43763, infoSecurePort=0, ipcPort=38853, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:14,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:14,158 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31 2024-12-07T12:22:14,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/zookeeper_0, clientPort=56615, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:22:14,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56615 2024-12-07T12:22:14,163 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:22:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:22:14,175 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae with version=8 2024-12-07T12:22:14,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:22:14,177 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:22:14,177 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:22:14,178 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:22:14,178 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36039 2024-12-07T12:22:14,180 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36039 connecting to ZooKeeper ensemble=127.0.0.1:56615 2024-12-07T12:22:14,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360390x0, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:22:14,186 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36039-0x1018cdff4200000 connected 2024-12-07T12:22:14,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,203 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:14,206 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae, hbase.cluster.distributed=false 2024-12-07T12:22:14,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:22:14,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-07T12:22:14,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36039 2024-12-07T12:22:14,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36039 2024-12-07T12:22:14,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-07T12:22:14,216 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36039 2024-12-07T12:22:14,231 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:22:14,231 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,231 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,231 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:22:14,231 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:14,231 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:22:14,232 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:22:14,232 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:22:14,232 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34911 2024-12-07T12:22:14,234 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34911 connecting to ZooKeeper ensemble=127.0.0.1:56615 2024-12-07T12:22:14,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,236 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349110x0, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:22:14,241 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349110x0, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:14,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34911-0x1018cdff4200001 connected 2024-12-07T12:22:14,241 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:22:14,241 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:22:14,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:22:14,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:22:14,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34911 2024-12-07T12:22:14,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34911 2024-12-07T12:22:14,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34911 2024-12-07T12:22:14,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34911 2024-12-07T12:22:14,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34911 2024-12-07T12:22:14,259 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:36039 2024-12-07T12:22:14,260 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:14,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:14,262 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:22:14,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,264 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:22:14,264 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,36039,1733574134177 from backup master directory 2024-12-07T12:22:14,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:14,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,267 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:22:14,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:14,267 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,271 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/hbase.id] with ID: 08f5b7ec-c15a-4ba2-a8a4-ec4b59b52a45 2024-12-07T12:22:14,271 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/.tmp/hbase.id 2024-12-07T12:22:14,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:22:14,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:22:14,279 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/.tmp/hbase.id]:[hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/hbase.id] 2024-12-07T12:22:14,290 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:14,290 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:22:14,292 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T12:22:14,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:22:14,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:22:14,303 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:22:14,304 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:22:14,304 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:14,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:22:14,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:22:14,315 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store 2024-12-07T12:22:14,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:22:14,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:22:14,322 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:14,322 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:22:14,323 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:14,323 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:14,323 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:22:14,323 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:14,323 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:14,323 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574134322Disabling compacts and flushes for region at 1733574134322Disabling writes for close at 1733574134323 (+1 ms)Writing region close event to WAL at 1733574134323Closed at 1733574134323 2024-12-07T12:22:14,324 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/.initializing 2024-12-07T12:22:14,324 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,326 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C36039%2C1733574134177, suffix=, logDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177, archiveDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/oldWALs, maxLogs=10 2024-12-07T12:22:14,327 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 2024-12-07T12:22:14,331 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 2024-12-07T12:22:14,332 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45401:45401),(127.0.0.1/127.0.0.1:43763:43763)] 2024-12-07T12:22:14,340 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:14,340 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:14,341 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,341 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:22:14,344 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:14,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:22:14,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:14,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:22:14,348 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:14,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:22:14,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:14,350 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,351 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,351 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,352 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,352 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,353 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:22:14,354 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:14,356 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:14,356 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874389, jitterRate=0.11184392869472504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:22:14,357 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574134341Initializing all the Stores at 1733574134342 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574134342Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574134342Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574134342Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574134342Cleaning up temporary data from old regions at 1733574134352 (+10 ms)Region opened successfully at 1733574134357 (+5 ms) 2024-12-07T12:22:14,358 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:22:14,361 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70058b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:22:14,362 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:22:14,362 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:22:14,362 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:22:14,362 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:22:14,363 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:22:14,363 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:22:14,363 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:22:14,365 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:22:14,366 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:22:14,367 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:22:14,367 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:22:14,368 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:22:14,369 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:22:14,369 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:22:14,370 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:22:14,371 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:22:14,372 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:22:14,375 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:22:14,377 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:22:14,378 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:22:14,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:14,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:14,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,380 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,36039,1733574134177, sessionid=0x1018cdff4200000, setting cluster-up flag (Was=false) 2024-12-07T12:22:14,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,389 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:22:14,390 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,400 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:22:14,401 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:14,402 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:22:14,403 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:14,404 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:22:14,404 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:22:14,404 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,36039,1733574134177 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:22:14,405 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574164407 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:22:14,407 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:14,407 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:22:14,407 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,408 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:22:14,408 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:22:14,408 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:22:14,408 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:22:14,408 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:22:14,409 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,409 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:22:14,410 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574134408,5,FailOnTimeoutGroup] 2024-12-07T12:22:14,411 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574134410,5,FailOnTimeoutGroup] 2024-12-07T12:22:14,411 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,411 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:22:14,411 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,411 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:22:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:22:14,419 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:22:14,419 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae 2024-12-07T12:22:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:22:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:22:14,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:14,432 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:22:14,434 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:22:14,434 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:14,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:22:14,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:22:14,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:14,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:22:14,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:22:14,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:14,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:22:14,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:22:14,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:14,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:14,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:22:14,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740 2024-12-07T12:22:14,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740 2024-12-07T12:22:14,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:22:14,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:22:14,442 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:22:14,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:22:14,445 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:14,445 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868992, jitterRate=0.10498106479644775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:22:14,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574134430Initializing all the Stores at 1733574134431 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574134431Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574134432 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574134432Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574134432Cleaning up temporary data from old regions at 1733574134442 (+10 ms)Region opened successfully at 1733574134446 (+4 ms) 2024-12-07T12:22:14,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:22:14,446 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:22:14,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:22:14,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:22:14,446 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:22:14,446 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:14,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574134446Disabling compacts and flushes for region at 1733574134446Disabling writes for close at 1733574134446Writing region close event to WAL at 1733574134446Closed at 1733574134446 2024-12-07T12:22:14,448 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:14,448 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:22:14,448 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:22:14,449 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(746): ClusterId : 08f5b7ec-c15a-4ba2-a8a4-ec4b59b52a45 2024-12-07T12:22:14,449 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:22:14,449 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:22:14,450 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:22:14,451 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:22:14,451 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:22:14,453 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:22:14,453 DEBUG [RS:0;27c6fcd7dac8:34911 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eb8e006, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:22:14,466 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:34911 2024-12-07T12:22:14,466 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:22:14,466 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:22:14,466 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:22:14,467 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,36039,1733574134177 with port=34911, startcode=1733574134231 2024-12-07T12:22:14,467 DEBUG [RS:0;27c6fcd7dac8:34911 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:22:14,469 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:22:14,469 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36039 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,469 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36039 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,471 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae 2024-12-07T12:22:14,471 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42733 2024-12-07T12:22:14,471 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:22:14,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:22:14,473 DEBUG [RS:0;27c6fcd7dac8:34911 {}] zookeeper.ZKUtil(111): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,473 WARN [RS:0;27c6fcd7dac8:34911 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:22:14,473 INFO [RS:0;27c6fcd7dac8:34911 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:14,473 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,473 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,34911,1733574134231] 2024-12-07T12:22:14,476 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:22:14,478 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:22:14,478 INFO [RS:0;27c6fcd7dac8:34911 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:22:14,478 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,478 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:22:14,479 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:22:14,479 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:14,480 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:22:14,481 DEBUG [RS:0;27c6fcd7dac8:34911 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:22:14,483 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,483 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,483 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,483 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,484 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,484 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,34911,1733574134231-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:22:14,499 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:22:14,499 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,34911,1733574134231-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,499 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,499 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.Replication(171): 27c6fcd7dac8,34911,1733574134231 started 2024-12-07T12:22:14,513 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:14,514 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,34911,1733574134231, RpcServer on 27c6fcd7dac8/172.17.0.2:34911, sessionid=0x1018cdff4200001 2024-12-07T12:22:14,514 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:22:14,514 DEBUG [RS:0;27c6fcd7dac8:34911 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,514 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,34911,1733574134231' 2024-12-07T12:22:14,514 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:22:14,514 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,34911,1733574134231' 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:22:14,515 DEBUG [RS:0;27c6fcd7dac8:34911 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:22:14,515 INFO [RS:0;27c6fcd7dac8:34911 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:22:14,515 INFO [RS:0;27c6fcd7dac8:34911 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:22:14,601 WARN [27c6fcd7dac8:36039 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:22:14,617 INFO [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C34911%2C1733574134231, suffix=, logDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231, archiveDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs, maxLogs=32 2024-12-07T12:22:14,618 INFO [RS:0;27c6fcd7dac8:34911 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:14,624 INFO [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:14,625 DEBUG [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45401:45401),(127.0.0.1/127.0.0.1:43763:43763)] 2024-12-07T12:22:14,851 DEBUG [27c6fcd7dac8:36039 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:22:14,852 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:14,853 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,34911,1733574134231, state=OPENING 2024-12-07T12:22:14,856 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:22:14,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:14,857 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:22:14,858 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:14,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34911,1733574134231}] 2024-12-07T12:22:14,858 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:15,011 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:22:15,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40491, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:22:15,016 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:22:15,016 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:15,018 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C34911%2C1733574134231.meta, suffix=.meta, logDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231, archiveDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs, maxLogs=32 2024-12-07T12:22:15,018 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta 2024-12-07T12:22:15,023 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta 2024-12-07T12:22:15,024 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45401:45401),(127.0.0.1/127.0.0.1:43763:43763)] 2024-12-07T12:22:15,030 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:22:15,031 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:22:15,031 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:22:15,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:22:15,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:22:15,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:15,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:22:15,034 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:22:15,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:15,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:22:15,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:22:15,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:15,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:22:15,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:22:15,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:15,037 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:22:15,038 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740 2024-12-07T12:22:15,039 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740 2024-12-07T12:22:15,040 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:22:15,040 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:22:15,041 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:22:15,042 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:22:15,043 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820455, jitterRate=0.04326292872428894}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:22:15,043 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:22:15,044 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574135031Writing region info on filesystem at 1733574135031Initializing all the Stores at 1733574135032 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574135032Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574135032Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574135032Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574135032Cleaning up temporary data from old regions at 1733574135040 (+8 ms)Running coprocessor post-open hooks at 1733574135043 (+3 ms)Region opened successfully at 1733574135044 (+1 ms) 2024-12-07T12:22:15,045 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574135010 2024-12-07T12:22:15,047 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:22:15,047 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:22:15,048 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:15,049 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,34911,1733574134231, state=OPEN 2024-12-07T12:22:15,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:22:15,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:22:15,054 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:15,054 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:15,054 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:15,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:22:15,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34911,1733574134231 in 196 msec 2024-12-07T12:22:15,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:22:15,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-12-07T12:22:15,060 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:15,060 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:22:15,062 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:22:15,062 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,34911,1733574134231, seqNum=-1] 2024-12-07T12:22:15,062 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:22:15,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:22:15,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-12-07T12:22:15,069 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574135069, completionTime=-1 2024-12-07T12:22:15,069 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:22:15,069 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574195071 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574255071 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:36039, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,071 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,073 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.808sec 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:22:15,075 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:22:15,076 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:22:15,076 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:22:15,078 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:22:15,078 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:22:15,078 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,36039,1733574134177-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:15,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3d6246, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:15,149 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,36039,-1 for getting cluster id 2024-12-07T12:22:15,150 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:22:15,151 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '08f5b7ec-c15a-4ba2-a8a4-ec4b59b52a45' 2024-12-07T12:22:15,152 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:22:15,152 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "08f5b7ec-c15a-4ba2-a8a4-ec4b59b52a45" 2024-12-07T12:22:15,152 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2356e2a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:15,152 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,36039,-1] 2024-12-07T12:22:15,152 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:22:15,153 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:15,154 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35738, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:22:15,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@370b05c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:15,156 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:22:15,157 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,34911,1733574134231, seqNum=-1] 2024-12-07T12:22:15,157 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:22:15,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:15,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:22:15,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:15,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:15,164 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:22:15,164 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-07T12:22:15,164 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-07T12:22:15,164 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:22:15,165 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:15,165 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4040ae33 2024-12-07T12:22:15,165 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:22:15,167 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:22:15,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:15,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T12:22:15,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T12:22:15,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:22:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T12:22:15,170 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:22:15,170 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-07T12:22:15,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:22:15,172 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:22:15,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741835_1011 (size=395) 2024-12-07T12:22:15,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741835_1011 (size=395) 2024-12-07T12:22:15,180 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fd2dbe343c320aef54762bf37e94eca0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae 2024-12-07T12:22:15,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37577 is added to blk_1073741836_1012 (size=78) 2024-12-07T12:22:15,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34651 is added to blk_1073741836_1012 (size=78) 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing fd2dbe343c320aef54762bf37e94eca0, disabling compactions & flushes 2024-12-07T12:22:15,187 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. after waiting 0 ms 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,187 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,187 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for fd2dbe343c320aef54762bf37e94eca0: Waiting for close lock at 1733574135187Disabling compacts and flushes for region at 1733574135187Disabling writes for close at 1733574135187Writing region close event to WAL at 1733574135187Closed at 1733574135187 2024-12-07T12:22:15,188 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:22:15,188 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733574135188"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574135188"}]},"ts":"1733574135188"} 2024-12-07T12:22:15,191 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:22:15,192 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:22:15,192 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574135192"}]},"ts":"1733574135192"} 2024-12-07T12:22:15,194 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-07T12:22:15,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fd2dbe343c320aef54762bf37e94eca0, ASSIGN}] 2024-12-07T12:22:15,195 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fd2dbe343c320aef54762bf37e94eca0, ASSIGN 2024-12-07T12:22:15,196 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fd2dbe343c320aef54762bf37e94eca0, ASSIGN; state=OFFLINE, location=27c6fcd7dac8,34911,1733574134231; forceNewPlan=false, retain=false 2024-12-07T12:22:15,347 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fd2dbe343c320aef54762bf37e94eca0, regionState=OPENING, regionLocation=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:15,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fd2dbe343c320aef54762bf37e94eca0, ASSIGN because future has completed 2024-12-07T12:22:15,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd2dbe343c320aef54762bf37e94eca0, server=27c6fcd7dac8,34911,1733574134231}] 2024-12-07T12:22:15,507 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,508 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fd2dbe343c320aef54762bf37e94eca0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:15,508 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,508 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:15,508 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,508 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,509 INFO [StoreOpener-fd2dbe343c320aef54762bf37e94eca0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,511 INFO [StoreOpener-fd2dbe343c320aef54762bf37e94eca0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd2dbe343c320aef54762bf37e94eca0 columnFamilyName info 2024-12-07T12:22:15,511 DEBUG [StoreOpener-fd2dbe343c320aef54762bf37e94eca0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:15,511 INFO [StoreOpener-fd2dbe343c320aef54762bf37e94eca0-1 {}] regionserver.HStore(327): Store=fd2dbe343c320aef54762bf37e94eca0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:15,511 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,512 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,512 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,513 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,513 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,514 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,516 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:15,517 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fd2dbe343c320aef54762bf37e94eca0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877984, jitterRate=0.11641520261764526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:22:15,517 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:15,517 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fd2dbe343c320aef54762bf37e94eca0: Running coprocessor pre-open hook at 1733574135508Writing region info on filesystem at 1733574135508Initializing all the Stores at 1733574135509 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574135509Cleaning up temporary data from old regions at 1733574135513 (+4 ms)Running coprocessor post-open hooks at 1733574135517 (+4 ms)Region opened successfully at 1733574135517 2024-12-07T12:22:15,519 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0., pid=6, masterSystemTime=1733574135503 2024-12-07T12:22:15,521 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,521 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:15,522 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fd2dbe343c320aef54762bf37e94eca0, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:15,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd2dbe343c320aef54762bf37e94eca0, server=27c6fcd7dac8,34911,1733574134231 because future has completed 2024-12-07T12:22:15,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:22:15,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fd2dbe343c320aef54762bf37e94eca0, server=27c6fcd7dac8,34911,1733574134231 in 176 msec 2024-12-07T12:22:15,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:22:15,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=fd2dbe343c320aef54762bf37e94eca0, ASSIGN in 335 msec 2024-12-07T12:22:15,532 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:22:15,532 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574135532"}]},"ts":"1733574135532"} 2024-12-07T12:22:15,534 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-07T12:22:15,535 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:22:15,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 367 msec 2024-12-07T12:22:16,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:16,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:17,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:17,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:18,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:18,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:18,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:22:18,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T12:22:18,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T12:22:18,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-07T12:22:18,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:18,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T12:22:19,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:19,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:20,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:20,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:20,533 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:22:20,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:20,562 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:22:20,562 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-07T12:22:21,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:21,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:22,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:22,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:23,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:23,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:24,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:24,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:25,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:25,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36039 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:22:25,246 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-07T12:22:25,247 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-07T12:22:25,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T12:22:25,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:25,253 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0., hostname=27c6fcd7dac8,34911,1733574134231, seqNum=2] 2024-12-07T12:22:26,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:26,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:27,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:27,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:27,256 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:27,257 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,257 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,257 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,257 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK], DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]) is bad. 2024-12-07T12:22:27,257 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK], DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]) is bad. 2024-12-07T12:22:27,257 WARN [PacketResponder: BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37577] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,257 WARN [PacketResponder: BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37577] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,257 WARN [PacketResponder: BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37577] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK], DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37577,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]) is bad. 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-247609923_22 at /127.0.0.1:57406 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57406 dst: /127.0.0.1:34651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:57444 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57444 dst: /127.0.0.1:34651 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:47098 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47098 dst: /127.0.0.1:37577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-247609923_22 at /127.0.0.1:47060 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47060 dst: /127.0.0.1:37577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:47092 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37577:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47092 dst: /127.0.0.1:37577 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:57438 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57438 dst: /127.0.0.1:34651 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d10ba6a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:27,263 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b91ed3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:27,263 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:27,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718cd5f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:27,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8825f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:27,264 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:27,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:27,264 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid 3b1a354f-4214-4b38-84a9-fe4ba2597d49) service to localhost/127.0.0.1:42733 2024-12-07T12:22:27,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:27,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data3/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:27,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data4/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:27,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:27,284 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:27,288 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:27,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:27,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:27,289 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:22:27,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:27,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:27,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ca8564b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-44915-hadoop-hdfs-3_4_1-tests_jar-_-any-16324312556085427192/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:27,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:44915} 2024-12-07T12:22:27,407 INFO [Time-limited test {}] server.Server(415): Started @164825ms 2024-12-07T12:22:27,408 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:27,426 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,426 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,426 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:27,427 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-247609923_22 at /127.0.0.1:50174 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50174 dst: /127.0.0.1:34651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,427 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:50168 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50168 dst: /127.0.0.1:34651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,427 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:50172 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34651:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50172 dst: /127.0.0.1:34651 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:27,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16178224{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:27,435 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34c1099f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:27,435 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:27,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719d6bc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:27,436 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a928dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:27,437 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:27,437 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid cba2184d-b4b5-4a0d-9b40-8b5a5180d12f) service to localhost/127.0.0.1:42733 2024-12-07T12:22:27,437 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:27,437 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:27,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data1/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:27,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data2/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:27,439 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:27,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:27,453 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:27,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:27,454 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:27,454 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:27,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:27,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:27,501 WARN [Thread-1362 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:27,503 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccd52cfa418ce255 with lease ID 0xa7ce7d421ca39b22: from storage DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1 node DatanodeRegistration(127.0.0.1:38269, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=33435, infoSecurePort=0, ipcPort=43011, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccd52cfa418ce255 with lease ID 0xa7ce7d421ca39b22: from storage DS-901701bd-d3d0-4ba2-95c7-1e557bda66c2 node DatanodeRegistration(127.0.0.1:38269, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=33435, infoSecurePort=0, ipcPort=43011, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T12:22:27,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@528eeea6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-34467-hadoop-hdfs-3_4_1-tests_jar-_-any-14468048480030974268/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:27,573 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:34467} 2024-12-07T12:22:27,574 INFO [Time-limited test {}] server.Server(415): Started @164991ms 2024-12-07T12:22:27,575 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:27,663 WARN [Thread-1393 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:27,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42b653042cc2d9a3 with lease ID 0xa7ce7d421ca39b23: from storage DS-a35f727f-c8b9-4651-ba3c-f3286805dda7 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=46097, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:27,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42b653042cc2d9a3 with lease ID 0xa7ce7d421ca39b23: from storage DS-7838bdac-74b2-417c-92f9-c7f09d4fe723 node DatanodeRegistration(127.0.0.1:35197, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=46097, infoSecurePort=0, ipcPort=43655, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:28,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:28,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:28,593 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-07T12:22:28,596 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-07T12:22:28,597 ERROR [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:28,597 WARN [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:28,597 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C34911%2C1733574134231:(num 1733574134618) roll requested 2024-12-07T12:22:28,598 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:28,603 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 newFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:28,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:28,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:28,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:28,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:28,604 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:28,604 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:28,604 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:28,605 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:28,605 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:28,605 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46097:46097),(127.0.0.1/127.0.0.1:33435:33435)] 2024-12-07T12:22:28,605 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 is not closed yet, will try archiving it next time 2024-12-07T12:22:28,605 WARN [IPC Server handler 3 on default port 42733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-07T12:22:28,605 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 after 0ms 2024-12-07T12:22:29,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:29,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:30,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:30,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:30,608 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-07T12:22:31,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:31,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:32,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:32,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:32,503 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T12:22:32,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 after 4001ms 2024-12-07T12:22:32,611 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:32,612 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35197,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK], DatanodeInfoWithStorage[127.0.0.1:38269,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35197,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]) is bad. 2024-12-07T12:22:32,612 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:57082 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57082 dst: /127.0.0.1:38269 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:32,612 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:35168 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35197:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35168 dst: /127.0.0.1:35197 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:32,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@528eeea6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:32,614 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20466b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:32,614 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:32,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2026736d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:32,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:32,615 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:32,615 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid cba2184d-b4b5-4a0d-9b40-8b5a5180d12f) service to localhost/127.0.0.1:42733 2024-12-07T12:22:32,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data1/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:32,616 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:32,616 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:32,616 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data2/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:32,616 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:32,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:32,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:32,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:32,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:32,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:32,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24dbb8ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:32,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7269a538{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:32,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f2f2023{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-39171-hadoop-hdfs-3_4_1-tests_jar-_-any-359265380305642938/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:32,744 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ae70be3{HTTP/1.1, (http/1.1)}{localhost:39171} 2024-12-07T12:22:32,744 INFO [Time-limited test {}] server.Server(415): Started @170162ms 2024-12-07T12:22:32,745 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:32,763 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:32,764 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_797968841_22 at /127.0.0.1:39506 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39506 dst: /127.0.0.1:38269 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:32,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ca8564b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:32,770 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fa662a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:32,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:32,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167fd01b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:32,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:32,772 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:32,772 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:32,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:32,772 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid 3b1a354f-4214-4b38-84a9-fe4ba2597d49) service to localhost/127.0.0.1:42733 2024-12-07T12:22:32,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data3/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:32,773 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data4/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:32,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:32,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:32,786 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:32,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:32,786 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:32,786 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:32,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e60361d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:32,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ddd02f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:32,834 WARN [Thread-1436 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a80f9ffc9ea2f6c with lease ID 0xa7ce7d421ca39b24: from storage DS-a35f727f-c8b9-4651-ba3c-f3286805dda7 node DatanodeRegistration(127.0.0.1:34569, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=39723, infoSecurePort=0, ipcPort=33883, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:32,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a80f9ffc9ea2f6c with lease ID 0xa7ce7d421ca39b24: from storage DS-7838bdac-74b2-417c-92f9-c7f09d4fe723 node DatanodeRegistration(127.0.0.1:34569, datanodeUuid=cba2184d-b4b5-4a0d-9b40-8b5a5180d12f, infoPort=39723, infoSecurePort=0, ipcPort=33883, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:32,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@606a795b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/java.io.tmpdir/jetty-localhost-36501-hadoop-hdfs-3_4_1-tests_jar-_-any-5198313065235952408/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:32,909 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78f99ac1{HTTP/1.1, (http/1.1)}{localhost:36501} 2024-12-07T12:22:32,909 INFO [Time-limited test {}] server.Server(415): Started @170327ms 2024-12-07T12:22:32,911 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:33,000 WARN [Thread-1467 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:33,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x719bc8f6c01d9753 with lease ID 0xa7ce7d421ca39b25: from storage DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1 node DatanodeRegistration(127.0.0.1:36155, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=45659, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:33,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x719bc8f6c01d9753 with lease ID 0xa7ce7d421ca39b25: from storage DS-901701bd-d3d0-4ba2-95c7-1e557bda66c2 node DatanodeRegistration(127.0.0.1:36155, datanodeUuid=3b1a354f-4214-4b38-84a9-fe4ba2597d49, infoPort=45659, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=349665632;c=1733574133374), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:33,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:33,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:33,930 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-07T12:22:33,932 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-07T12:22:33,933 ERROR [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38269,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:33,933 WARN [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38269,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:33,933 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C34911%2C1733574134231:(num 1733574148597) roll requested 2024-12-07T12:22:33,933 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:33,939 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 newFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:33,939 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:33,939 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:33,939 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:33,939 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:33,939 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:33,939 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:33,939 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38269,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:33,940 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38269,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:33,940 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:33,940 WARN [IPC Server handler 2 on default port 42733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-07T12:22:33,940 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45659:45659),(127.0.0.1/127.0.0.1:39723:39723)] 2024-12-07T12:22:33,940 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 is not closed yet, will try archiving it next time 2024-12-07T12:22:33,940 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 after 0ms 2024-12-07T12:22:34,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:34,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:35,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:35,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:35,941 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:35,947 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 newFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:35,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:35,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:35,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:35,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:35,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:35,948 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:35,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741838_1019 (size=1264) 2024-12-07T12:22:35,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741838_1019 (size=1264) 2024-12-07T12:22:35,950 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 is not closed yet, will try archiving it next time 2024-12-07T12:22:35,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45659:45659),(127.0.0.1/127.0.0.1:39723:39723)] 2024-12-07T12:22:35,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 is not closed yet, will try archiving it next time 2024-12-07T12:22:35,951 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:35,951 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:35,951 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 after 0ms 2024-12-07T12:22:35,952 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:35,960 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733574135518/Put/vlen=218/seqid=0] 2024-12-07T12:22:35,960 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733574145254/Put/vlen=1045/seqid=0] 2024-12-07T12:22:35,960 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574134618 2024-12-07T12:22:35,960 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:35,960 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:35,961 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 after 1ms 2024-12-07T12:22:35,961 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:35,963 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733574148597/Put/vlen=1045/seqid=0] 2024-12-07T12:22:35,963 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733574150609/Put/vlen=1045/seqid=0] 2024-12-07T12:22:35,964 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 2024-12-07T12:22:35,964 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:35,964 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:35,964 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 after 0ms 2024-12-07T12:22:35,964 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574153933 2024-12-07T12:22:35,966 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733574153933/Put/vlen=1045/seqid=0] 2024-12-07T12:22:35,967 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:35,967 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:35,967 WARN [IPC Server handler 4 on default port 42733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-07T12:22:35,967 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 after 0ms 2024-12-07T12:22:36,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:36,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:36,840 WARN [ResponseProcessor for block BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:36,840 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-247609923_22 at /127.0.0.1:34762 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34762 dst: /127.0.0.1:36155 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36155 remote=/127.0.0.1:34762]. Total timeout mills is 60000, 59107 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:36,840 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-247609923_22 at /127.0.0.1:37010 [Receiving block BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34569:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37010 dst: /127.0.0.1:34569 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:22:36,840 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 block BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36155,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK], DatanodeInfoWithStorage[127.0.0.1:34569,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36155,DS-83ed6f54-b3f8-4792-b9f6-eeb7a26393c1,DISK]) is bad. 2024-12-07T12:22:36,841 WARN [DataStreamer for file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 block BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741839_1022 (size=85) 2024-12-07T12:22:37,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:37,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:37,836 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T12:22:37,941 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574148597 after 4001ms 2024-12-07T12:22:38,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:38,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:39,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:39,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:39,968 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 after 4001ms 2024-12-07T12:22:39,968 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:39,971 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:39,972 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing fd2dbe343c320aef54762bf37e94eca0 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-07T12:22:39,972 ERROR [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:39,972 WARN [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:39,973 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C34911%2C1733574134231:(num 1733574155941) roll requested 2024-12-07T12:22:39,973 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.1733574159973 2024-12-07T12:22:39,985 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 newFile=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574159973 2024-12-07T12:22:39,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:39,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:39,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:39,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:39,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:39,986 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574159973 2024-12-07T12:22:39,986 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:39,986 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-688210996-172.17.0.2-1733574133374:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:39,986 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:39,987 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 after 1ms 2024-12-07T12:22:39,988 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 to hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs/27c6fcd7dac8%2C34911%2C1733574134231.1733574155941 2024-12-07T12:22:39,988 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45659:45659),(127.0.0.1/127.0.0.1:39723:39723)] 2024-12-07T12:22:40,004 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/.tmp/info/a64f707892ef4a5b83f8ed8b8a8c910d is 1080, key is row1002/info:/1733574145254/Put/seqid=0 2024-12-07T12:22:40,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741841_1024 (size=9270) 2024-12-07T12:22:40,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741841_1024 (size=9270) 2024-12-07T12:22:40,009 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/.tmp/info/a64f707892ef4a5b83f8ed8b8a8c910d 2024-12-07T12:22:40,016 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/.tmp/info/a64f707892ef4a5b83f8ed8b8a8c910d as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/info/a64f707892ef4a5b83f8ed8b8a8c910d 2024-12-07T12:22:40,021 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/info/a64f707892ef4a5b83f8ed8b8a8c910d, entries=4, sequenceid=8, filesize=9.1 K 2024-12-07T12:22:40,022 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for fd2dbe343c320aef54762bf37e94eca0 in 51ms, sequenceid=8, compaction requested=false 2024-12-07T12:22:40,022 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for fd2dbe343c320aef54762bf37e94eca0: 2024-12-07T12:22:40,022 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-07T12:22:40,022 ERROR [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:40,022 WARN [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae-prefix:27c6fcd7dac8,34911,1733574134231.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:40,022 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C34911%2C1733574134231.meta:.meta(num 1733574135018) roll requested 2024-12-07T12:22:40,023 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574160023.meta 2024-12-07T12:22:40,027 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,027 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,028 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,028 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,028 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,028 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574160023.meta 2024-12-07T12:22:40,028 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:40,028 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:40,028 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta 2024-12-07T12:22:40,029 WARN [IPC Server handler 1 on default port 42733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-12-07T12:22:40,029 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta after 1ms 2024-12-07T12:22:40,029 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45659:45659),(127.0.0.1/127.0.0.1:39723:39723)] 2024-12-07T12:22:40,029 DEBUG [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta is not closed yet, will try archiving it next time 2024-12-07T12:22:40,044 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/info/d5871477fdb1435cbc2f95cbf846d00f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0./info:regioninfo/1733574135522/Put/seqid=0 2024-12-07T12:22:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741843_1027 (size=7125) 2024-12-07T12:22:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741843_1027 (size=7125) 2024-12-07T12:22:40,050 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/info/d5871477fdb1435cbc2f95cbf846d00f 2024-12-07T12:22:40,070 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/ns/02842e62d3f743adb87ddf33b0de58c8 is 43, key is default/ns:d/1733574135064/Put/seqid=0 2024-12-07T12:22:40,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741844_1028 (size=5153) 2024-12-07T12:22:40,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741844_1028 (size=5153) 2024-12-07T12:22:40,075 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/ns/02842e62d3f743adb87ddf33b0de58c8 2024-12-07T12:22:40,094 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/table/a701db5aba0e4b6691e58a71237f7c03 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733574135532/Put/seqid=0 2024-12-07T12:22:40,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741845_1029 (size=5438) 2024-12-07T12:22:40,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741845_1029 (size=5438) 2024-12-07T12:22:40,100 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/table/a701db5aba0e4b6691e58a71237f7c03 2024-12-07T12:22:40,106 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/info/d5871477fdb1435cbc2f95cbf846d00f as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/info/d5871477fdb1435cbc2f95cbf846d00f 2024-12-07T12:22:40,113 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/info/d5871477fdb1435cbc2f95cbf846d00f, entries=10, sequenceid=11, filesize=7.0 K 2024-12-07T12:22:40,114 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/ns/02842e62d3f743adb87ddf33b0de58c8 as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/ns/02842e62d3f743adb87ddf33b0de58c8 2024-12-07T12:22:40,120 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/ns/02842e62d3f743adb87ddf33b0de58c8, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T12:22:40,121 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/.tmp/table/a701db5aba0e4b6691e58a71237f7c03 as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/table/a701db5aba0e4b6691e58a71237f7c03 2024-12-07T12:22:40,128 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/table/a701db5aba0e4b6691e58a71237f7c03, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T12:22:40,129 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 107ms, sequenceid=11, compaction requested=false 2024-12-07T12:22:40,129 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T12:22:40,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:22:40,136 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:22:40,136 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:40,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:40,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:40,136 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:22:40,136 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:22:40,136 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1600481109, stopped=false 2024-12-07T12:22:40,137 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,36039,1733574134177 2024-12-07T12:22:40,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:40,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:40,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:40,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:40,138 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:22:40,138 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:22:40,138 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:40,139 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:40,139 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,34911,1733574134231' ***** 2024-12-07T12:22:40,139 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:22:40,139 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:40,139 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:22:40,139 INFO [RS:0;27c6fcd7dac8:34911 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:22:40,139 INFO [RS:0;27c6fcd7dac8:34911 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:22:40,139 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(3091): Received CLOSE for fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:40,139 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:34911. 2024-12-07T12:22:40,140 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:40,140 DEBUG [RS:0;27c6fcd7dac8:34911 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:22:40,140 DEBUG [RS:0;27c6fcd7dac8:34911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:40,140 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fd2dbe343c320aef54762bf37e94eca0, disabling compactions & flushes 2024-12-07T12:22:40,140 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:22:40,140 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:22:40,140 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. after waiting 0 ms 2024-12-07T12:22:40,140 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:22:40,140 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:40,141 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T12:22:40,141 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1325): Online Regions={fd2dbe343c320aef54762bf37e94eca0=TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T12:22:40,141 DEBUG [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, fd2dbe343c320aef54762bf37e94eca0 2024-12-07T12:22:40,141 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:22:40,141 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:22:40,141 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:22:40,141 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:22:40,141 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:22:40,149 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/default/TestLogRolling-testLogRollOnPipelineRestart/fd2dbe343c320aef54762bf37e94eca0/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-07T12:22:40,149 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T12:22:40,150 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:40,150 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:40,150 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fd2dbe343c320aef54762bf37e94eca0: Waiting for close lock at 1733574160140Running coprocessor pre-close hooks at 1733574160140Disabling compacts and flushes for region at 1733574160140Disabling writes for close at 1733574160140Writing region close event to WAL at 1733574160141 (+1 ms)Running coprocessor post-close hooks at 1733574160150 (+9 ms)Closed at 1733574160150 2024-12-07T12:22:40,150 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:40,150 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574160141Running coprocessor pre-close hooks at 1733574160141Disabling compacts and flushes for region at 1733574160141Disabling writes for close at 1733574160141Writing region close event to WAL at 1733574160143 (+2 ms)Running coprocessor post-close hooks at 1733574160150 (+7 ms)Closed at 1733574160150 2024-12-07T12:22:40,150 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733574135167.fd2dbe343c320aef54762bf37e94eca0. 2024-12-07T12:22:40,150 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:40,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:40,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:40,341 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,34911,1733574134231; all regions closed. 2024-12-07T12:22:40,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,342 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,342 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,342 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:40,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741842_1025 (size=825) 2024-12-07T12:22:40,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741842_1025 (size=825) 2024-12-07T12:22:40,485 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:40,577 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T12:22:40,577 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T12:22:41,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:41,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:42,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:42,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:43,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:43,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:44,004 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T12:22:44,030 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta after 4002ms 2024-12-07T12:22:44,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/WALs/27c6fcd7dac8,34911,1733574134231/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta to hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs/27c6fcd7dac8%2C34911%2C1733574134231.meta.1733574135018.meta 2024-12-07T12:22:44,033 DEBUG [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs 2024-12-07T12:22:44,033 INFO [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C34911%2C1733574134231.meta:.meta(num 1733574160023) 2024-12-07T12:22:44,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741840_1023 (size=1162) 2024-12-07T12:22:44,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741840_1023 (size=1162) 2024-12-07T12:22:44,040 DEBUG [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs 2024-12-07T12:22:44,040 INFO [RS:0;27c6fcd7dac8:34911 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C34911%2C1733574134231:(num 1733574159973) 2024-12-07T12:22:44,040 DEBUG [RS:0;27c6fcd7dac8:34911 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:44,040 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:22:44,040 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:22:44,040 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:22:44,041 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:22:44,041 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:22:44,041 INFO [RS:0;27c6fcd7dac8:34911 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34911 2024-12-07T12:22:44,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,34911,1733574134231 2024-12-07T12:22:44,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:22:44,043 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:22:44,044 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,34911,1733574134231] 2024-12-07T12:22:44,046 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,34911,1733574134231 already deleted, retry=false 2024-12-07T12:22:44,046 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,34911,1733574134231 expired; onlineServers=0 2024-12-07T12:22:44,047 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,36039,1733574134177' ***** 2024-12-07T12:22:44,047 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:22:44,047 DEBUG [M:0;27c6fcd7dac8:36039 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:22:44,047 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:22:44,047 DEBUG [M:0;27c6fcd7dac8:36039 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:22:44,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574134410 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574134410,5,FailOnTimeoutGroup] 2024-12-07T12:22:44,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574134408 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574134408,5,FailOnTimeoutGroup] 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:22:44,047 DEBUG [M:0;27c6fcd7dac8:36039 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:22:44,047 INFO [M:0;27c6fcd7dac8:36039 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:22:44,048 INFO [M:0;27c6fcd7dac8:36039 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:22:44,048 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:22:44,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:22:44,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:44,048 DEBUG [M:0;27c6fcd7dac8:36039 {}] zookeeper.ZKUtil(347): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:22:44,048 WARN [M:0;27c6fcd7dac8:36039 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:22:44,049 INFO [M:0;27c6fcd7dac8:36039 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/.lastflushedseqids 2024-12-07T12:22:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741846_1030 (size=130) 2024-12-07T12:22:44,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741846_1030 (size=130) 2024-12-07T12:22:44,055 INFO [M:0;27c6fcd7dac8:36039 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:22:44,055 INFO [M:0;27c6fcd7dac8:36039 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:22:44,055 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:22:44,055 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:44,055 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:44,055 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:22:44,055 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:44,055 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-07T12:22:44,056 ERROR [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData-prefix:27c6fcd7dac8,36039,1733574134177 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:44,056 WARN [FSHLog-0-hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData-prefix:27c6fcd7dac8,36039,1733574134177 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:44,056 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 27c6fcd7dac8%2C36039%2C1733574134177:(num 1733574134326) roll requested 2024-12-07T12:22:44,056 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C36039%2C1733574134177.1733574164056 2024-12-07T12:22:44,063 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,063 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,063 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,063 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,063 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,063 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574164056 2024-12-07T12:22:44,064 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:44,064 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34651,DS-a35f727f-c8b9-4651-ba3c-f3286805dda7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T12:22:44,064 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 2024-12-07T12:22:44,064 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39723:39723),(127.0.0.1/127.0.0.1:45659:45659)] 2024-12-07T12:22:44,064 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 is not closed yet, will try archiving it next time 2024-12-07T12:22:44,064 WARN [IPC Server handler 4 on default port 42733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-07T12:22:44,064 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 after 0ms 2024-12-07T12:22:44,079 DEBUG [M:0;27c6fcd7dac8:36039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3710a4ddc5eb42309178c9b4c74ec44c is 82, key is hbase:meta,,1/info:regioninfo/1733574135048/Put/seqid=0 2024-12-07T12:22:44,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741848_1033 (size=5672) 2024-12-07T12:22:44,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741848_1033 (size=5672) 2024-12-07T12:22:44,085 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3710a4ddc5eb42309178c9b4c74ec44c 2024-12-07T12:22:44,104 DEBUG [M:0;27c6fcd7dac8:36039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a27c91e1a06344da8c0c2cdb2cc932cb is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733574135536/Put/seqid=0 2024-12-07T12:22:44,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741849_1034 (size=6119) 2024-12-07T12:22:44,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741849_1034 (size=6119) 2024-12-07T12:22:44,109 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a27c91e1a06344da8c0c2cdb2cc932cb 2024-12-07T12:22:44,128 DEBUG [M:0;27c6fcd7dac8:36039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4c218b33e1045c99207cc63a082b4c2 is 69, key is 27c6fcd7dac8,34911,1733574134231/rs:state/1733574134470/Put/seqid=0 2024-12-07T12:22:44,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741850_1035 (size=5156) 2024-12-07T12:22:44,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741850_1035 (size=5156) 2024-12-07T12:22:44,133 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4c218b33e1045c99207cc63a082b4c2 2024-12-07T12:22:44,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:44,144 INFO [RS:0;27c6fcd7dac8:34911 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:22:44,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34911-0x1018cdff4200001, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:44,144 INFO [RS:0;27c6fcd7dac8:34911 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,34911,1733574134231; zookeeper connection closed. 2024-12-07T12:22:44,145 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@36f8c582 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@36f8c582 2024-12-07T12:22:44,145 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:22:44,152 DEBUG [M:0;27c6fcd7dac8:36039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71da274c73954c70baecd84853486214 is 52, key is load_balancer_on/state:d/1733574135163/Put/seqid=0 2024-12-07T12:22:44,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741851_1036 (size=5056) 2024-12-07T12:22:44,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741851_1036 (size=5056) 2024-12-07T12:22:44,157 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71da274c73954c70baecd84853486214 2024-12-07T12:22:44,158 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:22:44,162 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3710a4ddc5eb42309178c9b4c74ec44c as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3710a4ddc5eb42309178c9b4c74ec44c 2024-12-07T12:22:44,167 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3710a4ddc5eb42309178c9b4c74ec44c, entries=8, sequenceid=56, filesize=5.5 K 2024-12-07T12:22:44,168 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a27c91e1a06344da8c0c2cdb2cc932cb as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a27c91e1a06344da8c0c2cdb2cc932cb 2024-12-07T12:22:44,172 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a27c91e1a06344da8c0c2cdb2cc932cb, entries=6, sequenceid=56, filesize=6.0 K 2024-12-07T12:22:44,173 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d4c218b33e1045c99207cc63a082b4c2 as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d4c218b33e1045c99207cc63a082b4c2 2024-12-07T12:22:44,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:44,177 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d4c218b33e1045c99207cc63a082b4c2, entries=1, sequenceid=56, filesize=5.0 K 2024-12-07T12:22:44,178 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/71da274c73954c70baecd84853486214 as hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/71da274c73954c70baecd84853486214 2024-12-07T12:22:44,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:44,182 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/71da274c73954c70baecd84853486214, entries=1, sequenceid=56, filesize=4.9 K 2024-12-07T12:22:44,183 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false 2024-12-07T12:22:44,185 INFO [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:44,185 DEBUG [M:0;27c6fcd7dac8:36039 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574164055Disabling compacts and flushes for region at 1733574164055Disabling writes for close at 1733574164055Obtaining lock to block concurrent updates at 1733574164055Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574164055Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733574164056 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574164065 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574164065Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574164079 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574164079Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574164090 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574164103 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574164103Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574164113 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574164127 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574164127Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574164137 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574164151 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574164151Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4084b2e: reopening flushed file at 1733574164162 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d0a2cfa: reopening flushed file at 1733574164167 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d5c587b: reopening flushed file at 1733574164172 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2595854f: reopening flushed file at 1733574164177 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false at 1733574164183 (+6 ms)Writing region close event to WAL at 1733574164185 (+2 ms)Closed at 1733574164185 2024-12-07T12:22:44,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:22:44,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36155 is added to blk_1073741847_1031 (size=757) 2024-12-07T12:22:44,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34569 is added to blk_1073741847_1031 (size=757) 2024-12-07T12:22:45,150 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,151 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:45,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:45,675 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:22:45,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,697 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:45,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:46,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:46,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:47,006 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T12:22:47,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:47,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:48,065 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 after 4001ms 2024-12-07T12:22:48,066 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/WALs/27c6fcd7dac8,36039,1733574134177/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 to hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/oldWALs/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 2024-12-07T12:22:48,068 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/MasterData/oldWALs/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326 to hdfs://localhost:42733/user/jenkins/test-data/e4b377ba-78bd-c2c4-de7b-2743923accae/oldWALs/27c6fcd7dac8%2C36039%2C1733574134177.1733574134326$masterlocalwal$ 2024-12-07T12:22:48,068 INFO [M:0;27c6fcd7dac8:36039 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:22:48,068 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:22:48,069 INFO [M:0;27c6fcd7dac8:36039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36039 2024-12-07T12:22:48,069 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:22:48,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:48,171 INFO [M:0;27c6fcd7dac8:36039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:22:48,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36039-0x1018cdff4200000, quorum=127.0.0.1:56615, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:22:48,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@606a795b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:48,173 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78f99ac1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:48,173 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:48,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ddd02f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:48,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e60361d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:48,175 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:48,175 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:48,175 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:48,175 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid 3b1a354f-4214-4b38-84a9-fe4ba2597d49) service to localhost/127.0.0.1:42733 2024-12-07T12:22:48,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data3/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:48,176 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data4/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:48,176 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:48,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f2f2023{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:48,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:48,178 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ae70be3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:48,178 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:48,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7269a538{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:48,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24dbb8ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:48,179 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:22:48,179 WARN [BP-688210996-172.17.0.2-1733574133374 heartbeating to localhost/127.0.0.1:42733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-688210996-172.17.0.2-1733574133374 (Datanode Uuid cba2184d-b4b5-4a0d-9b40-8b5a5180d12f) service to localhost/127.0.0.1:42733 2024-12-07T12:22:48,179 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:22:48,179 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:22:48,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data1/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:48,180 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/cluster_2568f704-5c37-f461-1921-6023ff8e1bdd/data/data2/current/BP-688210996-172.17.0.2-1733574133374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:22:48,180 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:22:48,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:48,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71718145{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:22:48,186 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37ba1ac4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:22:48,186 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:22:48,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bf7054a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:22:48,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45628471{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir/,STOPPED} 2024-12-07T12:22:48,192 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:22:48,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:22:48,217 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42733 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42733 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42733 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:42733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=75 (was 105), ProcessCount=11 (was 11), AvailableMemoryMB=6213 (was 6367) 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=75, ProcessCount=11, AvailableMemoryMB=6213 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.log.dir so I do NOT create it in target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16a266a5-8f42-be19-4239-6c1bd8b5ea31/hadoop.tmp.dir so I do NOT create it in target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf, deleteOnExit=true 2024-12-07T12:22:48,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/test.cache.data in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:22:48,226 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:22:48,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:22:48,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:22:48,240 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:22:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:22:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T12:22:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T12:22:48,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:48,312 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:48,316 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:48,316 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:48,316 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:22:48,317 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:48,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fb33a9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:48,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1af676f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:48,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@188d3e33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/java.io.tmpdir/jetty-localhost-45695-hadoop-hdfs-3_4_1-tests_jar-_-any-4696687801097586194/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:22:48,431 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2aaa4790{HTTP/1.1, (http/1.1)}{localhost:45695} 2024-12-07T12:22:48,431 INFO [Time-limited test {}] server.Server(415): Started @185849ms 2024-12-07T12:22:48,444 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:22:48,501 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:48,504 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:48,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:48,505 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:48,505 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:22:48,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:48,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:48,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aa33ca4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/java.io.tmpdir/jetty-localhost-45229-hadoop-hdfs-3_4_1-tests_jar-_-any-10864191865815551636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:48,621 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:45229} 2024-12-07T12:22:48,621 INFO [Time-limited test {}] server.Server(415): Started @186039ms 2024-12-07T12:22:48,623 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:48,652 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:22:48,654 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:22:48,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:22:48,655 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:22:48,655 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:22:48,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@311facd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:22:48,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@555a4a92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:22:48,723 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data2/current/BP-37627011-172.17.0.2-1733574168256/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:48,723 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data1/current/BP-37627011-172.17.0.2-1733574168256/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:48,744 WARN [Thread-1640 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:48,747 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x76b953a5647025e2 with lease ID 0x6322e75e297c3d31: Processing first storage report for DS-f9744601-cdd2-47b4-8dc8-c55fb073b563 from datanode DatanodeRegistration(127.0.0.1:35739, datanodeUuid=1e3cbf44-289e-460b-aa8a-7a07257f4aab, infoPort=44495, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256) 2024-12-07T12:22:48,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76b953a5647025e2 with lease ID 0x6322e75e297c3d31: from storage DS-f9744601-cdd2-47b4-8dc8-c55fb073b563 node DatanodeRegistration(127.0.0.1:35739, datanodeUuid=1e3cbf44-289e-460b-aa8a-7a07257f4aab, infoPort=44495, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:48,747 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x76b953a5647025e2 with lease ID 0x6322e75e297c3d31: Processing first storage report for DS-27f66494-f6f8-4e48-a521-83a0d1c13384 from datanode DatanodeRegistration(127.0.0.1:35739, datanodeUuid=1e3cbf44-289e-460b-aa8a-7a07257f4aab, infoPort=44495, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256) 2024-12-07T12:22:48,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x76b953a5647025e2 with lease ID 0x6322e75e297c3d31: from storage DS-27f66494-f6f8-4e48-a521-83a0d1c13384 node DatanodeRegistration(127.0.0.1:35739, datanodeUuid=1e3cbf44-289e-460b-aa8a-7a07257f4aab, infoPort=44495, infoSecurePort=0, ipcPort=42227, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:48,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a3c6b7a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/java.io.tmpdir/jetty-localhost-46241-hadoop-hdfs-3_4_1-tests_jar-_-any-1768563925633464964/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:22:48,776 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1beefc80{HTTP/1.1, (http/1.1)}{localhost:46241} 2024-12-07T12:22:48,776 INFO [Time-limited test {}] server.Server(415): Started @186194ms 2024-12-07T12:22:48,778 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:22:48,882 WARN [Thread-1687 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data3/current/BP-37627011-172.17.0.2-1733574168256/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:48,882 WARN [Thread-1688 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data4/current/BP-37627011-172.17.0.2-1733574168256/current, will proceed with Du for space computation calculation, 2024-12-07T12:22:48,899 WARN [Thread-1676 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:22:48,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5663368bbaceacc with lease ID 0x6322e75e297c3d32: Processing first storage report for DS-3baa2e16-10fd-4846-b5c4-08f81bf81c78 from datanode DatanodeRegistration(127.0.0.1:46431, datanodeUuid=832863cd-9d1c-4c28-a7dd-cca5f36ef278, infoPort=38969, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256) 2024-12-07T12:22:48,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5663368bbaceacc with lease ID 0x6322e75e297c3d32: from storage DS-3baa2e16-10fd-4846-b5c4-08f81bf81c78 node DatanodeRegistration(127.0.0.1:46431, datanodeUuid=832863cd-9d1c-4c28-a7dd-cca5f36ef278, infoPort=38969, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:48,901 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5663368bbaceacc with lease ID 0x6322e75e297c3d32: Processing first storage report for DS-ca783958-35c9-42d9-8d6c-7b5374b38057 from datanode DatanodeRegistration(127.0.0.1:46431, datanodeUuid=832863cd-9d1c-4c28-a7dd-cca5f36ef278, infoPort=38969, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256) 2024-12-07T12:22:48,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5663368bbaceacc with lease ID 0x6322e75e297c3d32: from storage DS-ca783958-35c9-42d9-8d6c-7b5374b38057 node DatanodeRegistration(127.0.0.1:46431, datanodeUuid=832863cd-9d1c-4c28-a7dd-cca5f36ef278, infoPort=38969, infoSecurePort=0, ipcPort=40767, storageInfo=lv=-57;cid=testClusterID;nsid=991408484;c=1733574168256), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:22:49,000 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31 2024-12-07T12:22:49,003 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/zookeeper_0, clientPort=59219, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:22:49,004 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59219 2024-12-07T12:22:49,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:22:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:22:49,014 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd with version=8 2024-12-07T12:22:49,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:22:49,017 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:22:49,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:22:49,018 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41517 2024-12-07T12:22:49,020 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41517 connecting to ZooKeeper ensemble=127.0.0.1:59219 2024-12-07T12:22:49,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415170x0, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:22:49,026 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41517-0x1018ce07c3a0000 connected 2024-12-07T12:22:49,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:49,043 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd, hbase.cluster.distributed=false 2024-12-07T12:22:49,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:22:49,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41517 2024-12-07T12:22:49,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41517 2024-12-07T12:22:49,045 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41517 2024-12-07T12:22:49,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41517 2024-12-07T12:22:49,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41517 2024-12-07T12:22:49,061 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:22:49,061 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:22:49,062 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:22:49,062 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44009 2024-12-07T12:22:49,063 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44009 connecting to ZooKeeper ensemble=127.0.0.1:59219 2024-12-07T12:22:49,064 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440090x0, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:22:49,069 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44009-0x1018ce07c3a0001 connected 2024-12-07T12:22:49,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:22:49,070 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:22:49,070 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:22:49,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:22:49,071 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:22:49,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44009 2024-12-07T12:22:49,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44009 2024-12-07T12:22:49,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44009 2024-12-07T12:22:49,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44009 2024-12-07T12:22:49,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44009 2024-12-07T12:22:49,084 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:41517 2024-12-07T12:22:49,084 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:49,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:49,086 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:22:49,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,090 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:22:49,090 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,41517,1733574169016 from backup master directory 2024-12-07T12:22:49,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:49,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:22:49,091 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:22:49,091 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,094 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/hbase.id] with ID: 0957eba1-5fb2-4e8d-84b8-7032ccc0537e 2024-12-07T12:22:49,094 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/.tmp/hbase.id 2024-12-07T12:22:49,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:22:49,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:22:49,100 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/.tmp/hbase.id]:[hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/hbase.id] 2024-12-07T12:22:49,110 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,110 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:22:49,111 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T12:22:49,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:22:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:22:49,122 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:22:49,123 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:22:49,124 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:49,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:22:49,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:22:49,135 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store 2024-12-07T12:22:49,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:22:49,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:22:49,141 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:49,141 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:22:49,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:49,141 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:49,141 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:22:49,141 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:49,141 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:22:49,142 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574169141Disabling compacts and flushes for region at 1733574169141Disabling writes for close at 1733574169141Writing region close event to WAL at 1733574169141Closed at 1733574169141 2024-12-07T12:22:49,142 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/.initializing 2024-12-07T12:22:49,142 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/WALs/27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,144 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C41517%2C1733574169016, suffix=, logDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/WALs/27c6fcd7dac8,41517,1733574169016, archiveDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/oldWALs, maxLogs=10 2024-12-07T12:22:49,145 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C41517%2C1733574169016.1733574169145 2024-12-07T12:22:49,149 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/WALs/27c6fcd7dac8,41517,1733574169016/27c6fcd7dac8%2C41517%2C1733574169016.1733574169145 2024-12-07T12:22:49,150 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38969:38969),(127.0.0.1/127.0.0.1:44495:44495)] 2024-12-07T12:22:49,150 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:49,150 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:49,151 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,151 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:22:49,153 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:22:49,154 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:49,155 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:22:49,156 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:49,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:22:49,157 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:49,158 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,158 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,158 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,160 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,160 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,160 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:22:49,161 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:22:49,163 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:49,163 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755558, jitterRate=-0.039258360862731934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:22:49,164 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574169151Initializing all the Stores at 1733574169151Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169151Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574169152 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574169152Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574169152Cleaning up temporary data from old regions at 1733574169160 (+8 ms)Region opened successfully at 1733574169164 (+4 ms) 2024-12-07T12:22:49,166 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:22:49,168 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f983b9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:22:49,169 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:22:49,170 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:22:49,172 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:22:49,173 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:22:49,174 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:22:49,174 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:22:49,175 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:22:49,177 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:22:49,177 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:22:49,178 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:22:49,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:49,179 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:22:49,180 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:22:49,181 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:22:49,182 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:22:49,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:49,183 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:22:49,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:49,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:22:49,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,187 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,41517,1733574169016, sessionid=0x1018ce07c3a0000, setting cluster-up flag (Was=false) 2024-12-07T12:22:49,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,194 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:22:49,194 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,201 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:22:49,202 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,202 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:22:49,204 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:49,204 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:22:49,204 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:22:49,205 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,41517,1733574169016 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:22:49,206 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574199206 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:22:49,207 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,207 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:49,208 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:22:49,208 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:22:49,208 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:22:49,208 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:22:49,208 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:22:49,208 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:22:49,208 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574169208,5,FailOnTimeoutGroup] 2024-12-07T12:22:49,209 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574169208,5,FailOnTimeoutGroup] 2024-12-07T12:22:49,209 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,209 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,209 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:22:49,209 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,209 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,209 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:22:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:22:49,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:22:49,218 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:22:49,218 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd 2024-12-07T12:22:49,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:22:49,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:22:49,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:49,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:22:49,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:22:49,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:22:49,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:22:49,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:22:49,229 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:22:49,229 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:22:49,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:22:49,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:22:49,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740 2024-12-07T12:22:49,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740 2024-12-07T12:22:49,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:22:49,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:22:49,234 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:22:49,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:22:49,237 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:49,237 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807130, jitterRate=0.026319533586502075}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574169224Initializing all the Stores at 1733574169225 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169225Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169225Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574169225Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169225Cleaning up temporary data from old regions at 1733574169233 (+8 ms)Region opened successfully at 1733574169238 (+5 ms) 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:22:49,238 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:22:49,238 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:22:49,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574169238Disabling compacts and flushes for region at 1733574169238Disabling writes for close at 1733574169238Writing region close event to WAL at 1733574169238Closed at 1733574169238 2024-12-07T12:22:49,240 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:49,240 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:22:49,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:22:49,241 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:22:49,242 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:22:49,275 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(746): ClusterId : 0957eba1-5fb2-4e8d-84b8-7032ccc0537e 2024-12-07T12:22:49,275 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:22:49,277 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:22:49,277 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:22:49,280 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:22:49,280 DEBUG [RS:0;27c6fcd7dac8:44009 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da28fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:22:49,292 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:44009 2024-12-07T12:22:49,292 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:22:49,292 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:22:49,292 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:22:49,292 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,41517,1733574169016 with port=44009, startcode=1733574169061 2024-12-07T12:22:49,293 DEBUG [RS:0;27c6fcd7dac8:44009 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:22:49,294 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57981, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:22:49,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41517 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41517 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,297 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd 2024-12-07T12:22:49,297 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45559 2024-12-07T12:22:49,297 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:22:49,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:22:49,299 DEBUG [RS:0;27c6fcd7dac8:44009 {}] zookeeper.ZKUtil(111): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,299 WARN [RS:0;27c6fcd7dac8:44009 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:22:49,299 INFO [RS:0;27c6fcd7dac8:44009 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:49,299 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,299 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,44009,1733574169061] 2024-12-07T12:22:49,302 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:22:49,303 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:22:49,304 INFO [RS:0;27c6fcd7dac8:44009 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:22:49,304 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,304 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:22:49,305 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:22:49,305 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:22:49,305 DEBUG [RS:0;27c6fcd7dac8:44009 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:22:49,306 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,306 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,307 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,307 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,307 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,307 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44009,1733574169061-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:22:49,321 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:22:49,321 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44009,1733574169061-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,322 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,322 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.Replication(171): 27c6fcd7dac8,44009,1733574169061 started 2024-12-07T12:22:49,336 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,336 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,44009,1733574169061, RpcServer on 27c6fcd7dac8/172.17.0.2:44009, sessionid=0x1018ce07c3a0001 2024-12-07T12:22:49,336 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:22:49,336 DEBUG [RS:0;27c6fcd7dac8:44009 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,336 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,44009,1733574169061' 2024-12-07T12:22:49,336 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:22:49,336 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,44009,1733574169061' 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:22:49,337 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:22:49,338 DEBUG [RS:0;27c6fcd7dac8:44009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:22:49,338 INFO [RS:0;27c6fcd7dac8:44009 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:22:49,338 INFO [RS:0;27c6fcd7dac8:44009 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:22:49,392 WARN [27c6fcd7dac8:41517 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:22:49,439 INFO [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C44009%2C1733574169061, suffix=, logDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061, archiveDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs, maxLogs=32 2024-12-07T12:22:49,440 INFO [RS:0;27c6fcd7dac8:44009 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 2024-12-07T12:22:49,445 INFO [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 2024-12-07T12:22:49,456 DEBUG [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38969:38969),(127.0.0.1/127.0.0.1:44495:44495)] 2024-12-07T12:22:49,643 DEBUG [27c6fcd7dac8:41517 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:22:49,643 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,644 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,44009,1733574169061, state=OPENING 2024-12-07T12:22:49,646 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:22:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:22:49,648 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:22:49,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,44009,1733574169061}] 2024-12-07T12:22:49,648 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:49,648 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:49,801 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:22:49,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38571, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:22:49,806 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:22:49,806 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:22:49,808 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C44009%2C1733574169061.meta, suffix=.meta, logDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061, archiveDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs, maxLogs=32 2024-12-07T12:22:49,808 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44009%2C1733574169061.meta.1733574169808.meta 2024-12-07T12:22:49,813 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.meta.1733574169808.meta 2024-12-07T12:22:49,817 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38969:38969),(127.0.0.1/127.0.0.1:44495:44495)] 2024-12-07T12:22:49,820 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:22:49,821 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:22:49,821 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:22:49,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:22:49,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:22:49,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:22:49,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:22:49,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:22:49,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:22:49,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:22:49,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:22:49,826 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:22:49,827 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:22:49,827 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740 2024-12-07T12:22:49,828 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740 2024-12-07T12:22:49,829 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:22:49,829 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:22:49,830 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:22:49,831 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:22:49,832 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=881142, jitterRate=0.12043032050132751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:22:49,832 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:22:49,832 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574169821Writing region info on filesystem at 1733574169821Initializing all the Stores at 1733574169822 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169822Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169822Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574169822Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574169822Cleaning up temporary data from old regions at 1733574169829 (+7 ms)Running coprocessor post-open hooks at 1733574169832 (+3 ms)Region opened successfully at 1733574169832 2024-12-07T12:22:49,833 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574169800 2024-12-07T12:22:49,835 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:22:49,835 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:22:49,836 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,837 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,44009,1733574169061, state=OPEN 2024-12-07T12:22:49,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:22:49,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:22:49,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:49,842 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:22:49,842 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:49,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:22:49,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,44009,1733574169061 in 194 msec 2024-12-07T12:22:49,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:22:49,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-07T12:22:49,848 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:22:49,848 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:22:49,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:22:49,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,44009,1733574169061, seqNum=-1] 2024-12-07T12:22:49,849 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:22:49,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42469, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:22:49,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-12-07T12:22:49,856 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574169856, completionTime=-1 2024-12-07T12:22:49,856 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:22:49,857 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:22:49,858 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:22:49,858 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574229858 2024-12-07T12:22:49,858 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574289858 2024-12-07T12:22:49,858 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:41517, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,859 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,860 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.771sec 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:22:49,862 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:22:49,863 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:22:49,865 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:22:49,865 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:22:49,865 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,41517,1733574169016-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:22:49,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c49893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:49,875 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,41517,-1 for getting cluster id 2024-12-07T12:22:49,875 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:22:49,877 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0957eba1-5fb2-4e8d-84b8-7032ccc0537e' 2024-12-07T12:22:49,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:22:49,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0957eba1-5fb2-4e8d-84b8-7032ccc0537e" 2024-12-07T12:22:49,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@694d6bf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:49,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,41517,-1] 2024-12-07T12:22:49,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:22:49,878 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:22:49,879 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36186, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:22:49,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@380e5c32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:22:49,880 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:22:49,881 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,44009,1733574169061, seqNum=-1] 2024-12-07T12:22:49,881 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:22:49,882 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42818, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:22:49,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:22:49,886 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:22:49,886 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:22:49,887 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 27c6fcd7dac8,41517,1733574169016 2024-12-07T12:22:49,887 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@38cc2812 2024-12-07T12:22:49,887 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:22:49,888 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:22:49,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T12:22:49,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T12:22:49,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:22:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:22:49,891 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:22:49,891 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:49,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-07T12:22:49,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:22:49,892 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:22:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741835_1011 (size=405) 2024-12-07T12:22:49,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741835_1011 (size=405) 2024-12-07T12:22:49,901 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 681d3197c39ab834080385eab4e4fc43, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd 2024-12-07T12:22:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741836_1012 (size=88) 2024-12-07T12:22:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741836_1012 (size=88) 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 681d3197c39ab834080385eab4e4fc43, disabling compactions & flushes 2024-12-07T12:22:49,908 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. after waiting 0 ms 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:49,908 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:49,908 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 681d3197c39ab834080385eab4e4fc43: Waiting for close lock at 1733574169908Disabling compacts and flushes for region at 1733574169908Disabling writes for close at 1733574169908Writing region close event to WAL at 1733574169908Closed at 1733574169908 2024-12-07T12:22:49,910 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:22:49,910 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733574169910"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574169910"}]},"ts":"1733574169910"} 2024-12-07T12:22:49,913 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:22:49,914 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:22:49,915 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574169914"}]},"ts":"1733574169914"} 2024-12-07T12:22:49,917 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-07T12:22:49,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=681d3197c39ab834080385eab4e4fc43, ASSIGN}] 2024-12-07T12:22:49,918 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=681d3197c39ab834080385eab4e4fc43, ASSIGN 2024-12-07T12:22:49,919 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=681d3197c39ab834080385eab4e4fc43, ASSIGN; state=OFFLINE, location=27c6fcd7dac8,44009,1733574169061; forceNewPlan=false, retain=false 2024-12-07T12:22:50,070 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=681d3197c39ab834080385eab4e4fc43, regionState=OPENING, regionLocation=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:50,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=681d3197c39ab834080385eab4e4fc43, ASSIGN because future has completed 2024-12-07T12:22:50,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 681d3197c39ab834080385eab4e4fc43, server=27c6fcd7dac8,44009,1733574169061}] 2024-12-07T12:22:50,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:50,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:50,229 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:50,229 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 681d3197c39ab834080385eab4e4fc43, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:22:50,230 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,230 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:22:50,230 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,230 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,231 INFO [StoreOpener-681d3197c39ab834080385eab4e4fc43-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,233 INFO [StoreOpener-681d3197c39ab834080385eab4e4fc43-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 681d3197c39ab834080385eab4e4fc43 columnFamilyName info 2024-12-07T12:22:50,233 DEBUG [StoreOpener-681d3197c39ab834080385eab4e4fc43-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:22:50,233 INFO [StoreOpener-681d3197c39ab834080385eab4e4fc43-1 {}] regionserver.HStore(327): Store=681d3197c39ab834080385eab4e4fc43/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:22:50,233 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,234 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,234 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,235 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,235 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,236 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,238 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:22:50,238 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 681d3197c39ab834080385eab4e4fc43; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713827, jitterRate=-0.09232212603092194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:22:50,238 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:22:50,239 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 681d3197c39ab834080385eab4e4fc43: Running coprocessor pre-open hook at 1733574170230Writing region info on filesystem at 1733574170230Initializing all the Stores at 1733574170231 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574170231Cleaning up temporary data from old regions at 1733574170235 (+4 ms)Running coprocessor post-open hooks at 1733574170238 (+3 ms)Region opened successfully at 1733574170239 (+1 ms) 2024-12-07T12:22:50,240 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43., pid=6, masterSystemTime=1733574170225 2024-12-07T12:22:50,242 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:50,242 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:50,243 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=681d3197c39ab834080385eab4e4fc43, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,44009,1733574169061 2024-12-07T12:22:50,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 681d3197c39ab834080385eab4e4fc43, server=27c6fcd7dac8,44009,1733574169061 because future has completed 2024-12-07T12:22:50,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:22:50,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 681d3197c39ab834080385eab4e4fc43, server=27c6fcd7dac8,44009,1733574169061 in 174 msec 2024-12-07T12:22:50,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:22:50,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=681d3197c39ab834080385eab4e4fc43, ASSIGN in 333 msec 2024-12-07T12:22:50,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:22:50,253 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574170253"}]},"ts":"1733574170253"} 2024-12-07T12:22:50,256 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-07T12:22:50,257 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:22:50,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 368 msec 2024-12-07T12:22:51,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:51,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:52,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:52,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:53,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:53,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:53,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:22:53,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:53,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:22:54,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:54,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:55,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:55,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:55,302 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:22:55,303 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-07T12:22:56,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:56,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:57,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:57,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:58,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:58,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:58,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T12:22:58,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T12:22:58,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:22:58,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T12:22:58,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T12:22:58,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T12:22:58,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:22:58,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T12:22:59,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:59,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:22:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:22:59,946 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T12:22:59,946 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-07T12:22:59,949 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:22:59,949 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:22:59,952 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43., hostname=27c6fcd7dac8,44009,1733574169061, seqNum=2] 2024-12-07T12:22:59,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:22:59,965 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T12:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T12:22:59,966 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T12:22:59,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T12:23:00,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44009 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T12:23:00,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:00,128 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 681d3197c39ab834080385eab4e4fc43 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:23:00,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/bad512cbb11f487187af8f6ff59283ab is 1080, key is row0001/info:/1733574179954/Put/seqid=0 2024-12-07T12:23:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741837_1013 (size=6033) 2024-12-07T12:23:00,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741837_1013 (size=6033) 2024-12-07T12:23:00,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:00,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:00,551 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/bad512cbb11f487187af8f6ff59283ab 2024-12-07T12:23:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/bad512cbb11f487187af8f6ff59283ab as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab 2024-12-07T12:23:00,564 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab, entries=1, sequenceid=5, filesize=5.9 K 2024-12-07T12:23:00,565 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 437ms, sequenceid=5, compaction requested=false 2024-12-07T12:23:00,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 681d3197c39ab834080385eab4e4fc43: 2024-12-07T12:23:00,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:00,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T12:23:00,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T12:23:00,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T12:23:00,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 602 msec 2024-12-07T12:23:00,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 612 msec 2024-12-07T12:23:01,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:01,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:02,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:02,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:03,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:03,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:04,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:04,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:05,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:05,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:06,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:06,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:07,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:07,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:08,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:08,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:09,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:09,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T12:23:10,006 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T12:23:10,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-07T12:23:10,011 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T12:23:10,012 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T12:23:10,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T12:23:10,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44009 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-07T12:23:10,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:10,166 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 681d3197c39ab834080385eab4e4fc43 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:23:10,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b3f2bc1e7c3e4f7b9c14da307874467d is 1080, key is row0002/info:/1733574190007/Put/seqid=0 2024-12-07T12:23:10,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741838_1014 (size=6033) 2024-12-07T12:23:10,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741838_1014 (size=6033) 2024-12-07T12:23:10,177 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b3f2bc1e7c3e4f7b9c14da307874467d 2024-12-07T12:23:10,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b3f2bc1e7c3e4f7b9c14da307874467d as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d 2024-12-07T12:23:10,187 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d, entries=1, sequenceid=9, filesize=5.9 K 2024-12-07T12:23:10,189 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 22ms, sequenceid=9, compaction requested=false 2024-12-07T12:23:10,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 681d3197c39ab834080385eab4e4fc43: 2024-12-07T12:23:10,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:10,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-07T12:23:10,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-07T12:23:10,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:10,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:10,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T12:23:10,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-07T12:23:10,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-07T12:23:11,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:11,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:12,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:12,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:13,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:13,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:14,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:14,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:23:14,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:14,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta after 68031ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T12:23:15,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:15,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:16,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:16,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:17,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:17,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:18,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:18,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:19,000 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:23:19,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:19,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:20,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-07T12:23:20,076 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T12:23:20,079 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 2024-12-07T12:23:20,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:20,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:20,085 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:20,085 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:20,085 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:20,085 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 2024-12-07T12:23:20,086 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38969:38969),(127.0.0.1/127.0.0.1:44495:44495)] 2024-12-07T12:23:20,086 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 is not closed yet, will try archiving it next time 2024-12-07T12:23:20,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741833_1009 (size=5546) 2024-12-07T12:23:20,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:20,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741833_1009 (size=5546) 2024-12-07T12:23:20,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:20,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-07T12:23:20,090 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T12:23:20,091 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T12:23:20,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T12:23:20,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:20,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:20,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44009 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-07T12:23:20,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:20,244 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 681d3197c39ab834080385eab4e4fc43 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:23:20,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/2454201763e74d31ade7fdc56bebe96d is 1080, key is row0003/info:/1733574200078/Put/seqid=0 2024-12-07T12:23:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741840_1016 (size=6033) 2024-12-07T12:23:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741840_1016 (size=6033) 2024-12-07T12:23:20,254 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/2454201763e74d31ade7fdc56bebe96d 2024-12-07T12:23:20,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/2454201763e74d31ade7fdc56bebe96d as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d 2024-12-07T12:23:20,264 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d, entries=1, sequenceid=13, filesize=5.9 K 2024-12-07T12:23:20,265 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 21ms, sequenceid=13, compaction requested=true 2024-12-07T12:23:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 681d3197c39ab834080385eab4e4fc43: 2024-12-07T12:23:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-07T12:23:20,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-07T12:23:20,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-07T12:23:20,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-12-07T12:23:20,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-12-07T12:23:21,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:21,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:22,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:22,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:23,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:23,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:24,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:24,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:25,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:25,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:26,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:26,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:27,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:27,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:28,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:28,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:29,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:29,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:30,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-07T12:23:30,136 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T12:23:30,136 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:23:30,138 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:23:30,138 DEBUG [Time-limited test {}] regionserver.HStore(1541): 681d3197c39ab834080385eab4e4fc43/info is initiating minor compaction (all files) 2024-12-07T12:23:30,138 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:23:30,138 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:30,138 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 681d3197c39ab834080385eab4e4fc43/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:30,138 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d] into tmpdir=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp, totalSize=17.7 K 2024-12-07T12:23:30,138 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bad512cbb11f487187af8f6ff59283ab, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733574179954 2024-12-07T12:23:30,139 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b3f2bc1e7c3e4f7b9c14da307874467d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733574190007 2024-12-07T12:23:30,139 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2454201763e74d31ade7fdc56bebe96d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733574200078 2024-12-07T12:23:30,150 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 681d3197c39ab834080385eab4e4fc43#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:23:30,150 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/70dd20de4a5f457eab3ef33ef2e3c84b is 1080, key is row0001/info:/1733574179954/Put/seqid=0 2024-12-07T12:23:30,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741841_1017 (size=8296) 2024-12-07T12:23:30,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741841_1017 (size=8296) 2024-12-07T12:23:30,160 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/70dd20de4a5f457eab3ef33ef2e3c84b as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/70dd20de4a5f457eab3ef33ef2e3c84b 2024-12-07T12:23:30,166 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 681d3197c39ab834080385eab4e4fc43/info of 681d3197c39ab834080385eab4e4fc43 into 70dd20de4a5f457eab3ef33ef2e3c84b(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:23:30,166 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 681d3197c39ab834080385eab4e4fc43: 2024-12-07T12:23:30,168 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44009%2C1733574169061.1733574210168 2024-12-07T12:23:30,173 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:30,173 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:30,173 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:30,173 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:30,173 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:30,173 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574210168 2024-12-07T12:23:30,175 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38969:38969),(127.0.0.1/127.0.0.1:44495:44495)] 2024-12-07T12:23:30,175 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 is not closed yet, will try archiving it next time 2024-12-07T12:23:30,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741839_1015 (size=2520) 2024-12-07T12:23:30,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741839_1015 (size=2520) 2024-12-07T12:23:30,175 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs/27c6fcd7dac8%2C44009%2C1733574169061.1733574169440 2024-12-07T12:23:30,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:30,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:30,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-07T12:23:30,178 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T12:23:30,179 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T12:23:30,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T12:23:30,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:30,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:30,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44009 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-07T12:23:30,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:30,333 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 681d3197c39ab834080385eab4e4fc43 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:23:30,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b7b3513d864b4aecab679a7a0f1147ce is 1080, key is row0000/info:/1733574210167/Put/seqid=0 2024-12-07T12:23:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741843_1019 (size=6033) 2024-12-07T12:23:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741843_1019 (size=6033) 2024-12-07T12:23:30,343 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b7b3513d864b4aecab679a7a0f1147ce 2024-12-07T12:23:30,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/b7b3513d864b4aecab679a7a0f1147ce as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b7b3513d864b4aecab679a7a0f1147ce 2024-12-07T12:23:30,354 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b7b3513d864b4aecab679a7a0f1147ce, entries=1, sequenceid=18, filesize=5.9 K 2024-12-07T12:23:30,355 INFO [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 22ms, sequenceid=18, compaction requested=false 2024-12-07T12:23:30,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 681d3197c39ab834080385eab4e4fc43: 2024-12-07T12:23:30,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:30,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-07T12:23:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-07T12:23:30,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-07T12:23:30,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-12-07T12:23:30,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-12-07T12:23:30,624 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T12:23:30,624 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T12:23:31,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:31,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:32,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:32,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:33,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:33,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:34,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:34,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:35,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:35,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:35,230 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 681d3197c39ab834080385eab4e4fc43, had cached 0 bytes from a total of 14329 2024-12-07T12:23:36,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:36,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:37,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:37,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:38,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:38,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:39,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:39,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:40,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:40,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41517 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-07T12:23:40,217 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T12:23:40,219 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44009%2C1733574169061.1733574220219 2024-12-07T12:23:40,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,225 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,225 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,226 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574210168 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574220219 2024-12-07T12:23:40,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741842_1018 (size=2026) 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44495:44495),(127.0.0.1/127.0.0.1:38969:38969)] 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574210168 is not closed yet, will try archiving it next time 2024-12-07T12:23:40,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:23:40,228 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/WALs/27c6fcd7dac8,44009,1733574169061/27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs/27c6fcd7dac8%2C44009%2C1733574169061.1733574200079 2024-12-07T12:23:40,228 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:40,228 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:23:40,228 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1167285988, stopped=false 2024-12-07T12:23:40,228 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,41517,1733574169016 2024-12-07T12:23:40,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741842_1018 (size=2026) 2024-12-07T12:23:40,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:23:40,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:40,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:23:40,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:40,232 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:23:40,232 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:23:40,232 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:23:40,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:40,232 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:23:40,233 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,44009,1733574169061' ***** 2024-12-07T12:23:40,233 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:23:40,233 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(3091): Received CLOSE for 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:23:40,233 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,44009,1733574169061 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:23:40,233 INFO [RS:0;27c6fcd7dac8:44009 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:44009. 2024-12-07T12:23:40,234 DEBUG [RS:0;27c6fcd7dac8:44009 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:23:40,234 DEBUG [RS:0;27c6fcd7dac8:44009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 681d3197c39ab834080385eab4e4fc43, disabling compactions & flushes 2024-12-07T12:23:40,234 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:40,234 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:40,234 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:23:40,234 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. after waiting 0 ms 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:40,234 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:23:40,234 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 681d3197c39ab834080385eab4e4fc43 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:23:40,234 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T12:23:40,234 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 681d3197c39ab834080385eab4e4fc43=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.} 2024-12-07T12:23:40,234 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 681d3197c39ab834080385eab4e4fc43 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:23:40,234 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:23:40,234 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:23:40,234 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-07T12:23:40,239 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/54ed00cb8f774120b8489d9053ca4935 is 1080, key is row0001/info:/1733574220218/Put/seqid=0 2024-12-07T12:23:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741845_1021 (size=6033) 2024-12-07T12:23:40,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741845_1021 (size=6033) 2024-12-07T12:23:40,245 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/54ed00cb8f774120b8489d9053ca4935 2024-12-07T12:23:40,251 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/.tmp/info/54ed00cb8f774120b8489d9053ca4935 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/54ed00cb8f774120b8489d9053ca4935 2024-12-07T12:23:40,256 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/54ed00cb8f774120b8489d9053ca4935, entries=1, sequenceid=22, filesize=5.9 K 2024-12-07T12:23:40,257 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 23ms, sequenceid=22, compaction requested=true 2024-12-07T12:23:40,257 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d] to archive 2024-12-07T12:23:40,258 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:23:40,260 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/info/a2ea3ecd85e64fc8bffee00caac82566 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43./info:regioninfo/1733574170243/Put/seqid=0 2024-12-07T12:23:40,260 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/bad512cbb11f487187af8f6ff59283ab 2024-12-07T12:23:40,262 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/b3f2bc1e7c3e4f7b9c14da307874467d 2024-12-07T12:23:40,263 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d to hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/info/2454201763e74d31ade7fdc56bebe96d 2024-12-07T12:23:40,264 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=27c6fcd7dac8:41517 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T12:23:40,264 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [bad512cbb11f487187af8f6ff59283ab=6033, b3f2bc1e7c3e4f7b9c14da307874467d=6033, 2454201763e74d31ade7fdc56bebe96d=6033] 2024-12-07T12:23:40,269 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/681d3197c39ab834080385eab4e4fc43/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-07T12:23:40,269 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:40,269 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 681d3197c39ab834080385eab4e4fc43: Waiting for close lock at 1733574220234Running coprocessor pre-close hooks at 1733574220234Disabling compacts and flushes for region at 1733574220234Disabling writes for close at 1733574220234Obtaining lock to block concurrent updates at 1733574220234Preparing flush snapshotting stores in 681d3197c39ab834080385eab4e4fc43 at 1733574220234Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733574220234Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. at 1733574220235 (+1 ms)Flushing 681d3197c39ab834080385eab4e4fc43/info: creating writer at 1733574220235Flushing 681d3197c39ab834080385eab4e4fc43/info: appending metadata at 1733574220238 (+3 ms)Flushing 681d3197c39ab834080385eab4e4fc43/info: closing flushed file at 1733574220238Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62bb480c: reopening flushed file at 1733574220250 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 681d3197c39ab834080385eab4e4fc43 in 23ms, sequenceid=22, compaction requested=true at 1733574220257 (+7 ms)Writing region close event to WAL at 1733574220265 (+8 ms)Running coprocessor post-close hooks at 1733574220269 (+4 ms)Closed at 1733574220269 2024-12-07T12:23:40,270 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733574169888.681d3197c39ab834080385eab4e4fc43. 2024-12-07T12:23:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741846_1022 (size=7308) 2024-12-07T12:23:40,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741846_1022 (size=7308) 2024-12-07T12:23:40,307 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T12:23:40,307 INFO [regionserver/27c6fcd7dac8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T12:23:40,434 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T12:23:40,635 DEBUG [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T12:23:40,668 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/info/a2ea3ecd85e64fc8bffee00caac82566 2024-12-07T12:23:40,689 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/ns/9f09b9de46f44fe8b0d4bfbc5ba3d697 is 43, key is default/ns:d/1733574169851/Put/seqid=0 2024-12-07T12:23:40,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741847_1023 (size=5153) 2024-12-07T12:23:40,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741847_1023 (size=5153) 2024-12-07T12:23:40,694 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/ns/9f09b9de46f44fe8b0d4bfbc5ba3d697 2024-12-07T12:23:40,714 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/table/6405cf5b1122461a96bf6fb871267d1f is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733574170253/Put/seqid=0 2024-12-07T12:23:40,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741848_1024 (size=5508) 2024-12-07T12:23:40,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741848_1024 (size=5508) 2024-12-07T12:23:40,719 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/table/6405cf5b1122461a96bf6fb871267d1f 2024-12-07T12:23:40,725 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/info/a2ea3ecd85e64fc8bffee00caac82566 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/info/a2ea3ecd85e64fc8bffee00caac82566 2024-12-07T12:23:40,730 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/info/a2ea3ecd85e64fc8bffee00caac82566, entries=10, sequenceid=11, filesize=7.1 K 2024-12-07T12:23:40,731 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/ns/9f09b9de46f44fe8b0d4bfbc5ba3d697 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/ns/9f09b9de46f44fe8b0d4bfbc5ba3d697 2024-12-07T12:23:40,736 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/ns/9f09b9de46f44fe8b0d4bfbc5ba3d697, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T12:23:40,737 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/.tmp/table/6405cf5b1122461a96bf6fb871267d1f as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/table/6405cf5b1122461a96bf6fb871267d1f 2024-12-07T12:23:40,744 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/table/6405cf5b1122461a96bf6fb871267d1f, entries=2, sequenceid=11, filesize=5.4 K 2024-12-07T12:23:40,745 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 511ms, sequenceid=11, compaction requested=false 2024-12-07T12:23:40,750 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T12:23:40,750 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:23:40,750 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:23:40,750 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574220234Running coprocessor pre-close hooks at 1733574220234Disabling compacts and flushes for region at 1733574220234Disabling writes for close at 1733574220234Obtaining lock to block concurrent updates at 1733574220234Preparing flush snapshotting stores in 1588230740 at 1733574220234Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733574220235 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733574220235Flushing 1588230740/info: creating writer at 1733574220235Flushing 1588230740/info: appending metadata at 1733574220259 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733574220259Flushing 1588230740/ns: creating writer at 1733574220674 (+415 ms)Flushing 1588230740/ns: appending metadata at 1733574220688 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733574220688Flushing 1588230740/table: creating writer at 1733574220698 (+10 ms)Flushing 1588230740/table: appending metadata at 1733574220713 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733574220713Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a938635: reopening flushed file at 1733574220724 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8780703: reopening flushed file at 1733574220730 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4029205: reopening flushed file at 1733574220737 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 511ms, sequenceid=11, compaction requested=false at 1733574220745 (+8 ms)Writing region close event to WAL at 1733574220746 (+1 ms)Running coprocessor post-close hooks at 1733574220750 (+4 ms)Closed at 1733574220750 2024-12-07T12:23:40,750 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:23:40,835 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,44009,1733574169061; all regions closed. 2024-12-07T12:23:40,835 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,835 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,836 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,836 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,836 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741834_1010 (size=3306) 2024-12-07T12:23:40,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741834_1010 (size=3306) 2024-12-07T12:23:40,840 DEBUG [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs 2024-12-07T12:23:40,840 INFO [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C44009%2C1733574169061.meta:.meta(num 1733574169808) 2024-12-07T12:23:40,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,841 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,841 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,841 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:40,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741844_1020 (size=1252) 2024-12-07T12:23:40,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741844_1020 (size=1252) 2024-12-07T12:23:40,846 DEBUG [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/oldWALs 2024-12-07T12:23:40,846 INFO [RS:0;27c6fcd7dac8:44009 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C44009%2C1733574169061:(num 1733574220219) 2024-12-07T12:23:40,846 DEBUG [RS:0;27c6fcd7dac8:44009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:40,846 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:23:40,846 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:23:40,846 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T12:23:40,847 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:23:40,847 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:23:40,847 INFO [RS:0;27c6fcd7dac8:44009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44009 2024-12-07T12:23:40,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,44009,1733574169061 2024-12-07T12:23:40,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:23:40,849 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:23:40,850 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,44009,1733574169061] 2024-12-07T12:23:40,853 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,44009,1733574169061 already deleted, retry=false 2024-12-07T12:23:40,853 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,44009,1733574169061 expired; onlineServers=0 2024-12-07T12:23:40,853 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,41517,1733574169016' ***** 2024-12-07T12:23:40,853 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:23:40,853 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:23:40,853 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:23:40,853 DEBUG [M:0;27c6fcd7dac8:41517 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:23:40,854 DEBUG [M:0;27c6fcd7dac8:41517 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:23:40,854 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:23:40,854 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574169208 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574169208,5,FailOnTimeoutGroup] 2024-12-07T12:23:40,854 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574169208 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574169208,5,FailOnTimeoutGroup] 2024-12-07T12:23:40,854 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:23:40,854 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:23:40,854 DEBUG [M:0;27c6fcd7dac8:41517 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:23:40,854 INFO [M:0;27c6fcd7dac8:41517 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:23:40,854 INFO [M:0;27c6fcd7dac8:41517 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:23:40,854 INFO [M:0;27c6fcd7dac8:41517 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:23:40,855 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:23:40,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:23:40,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:40,855 DEBUG [M:0;27c6fcd7dac8:41517 {}] zookeeper.ZKUtil(347): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:23:40,855 WARN [M:0;27c6fcd7dac8:41517 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:23:40,856 INFO [M:0;27c6fcd7dac8:41517 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/.lastflushedseqids 2024-12-07T12:23:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741849_1025 (size=130) 2024-12-07T12:23:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741849_1025 (size=130) 2024-12-07T12:23:40,862 INFO [M:0;27c6fcd7dac8:41517 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:23:40,862 INFO [M:0;27c6fcd7dac8:41517 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:23:40,863 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:23:40,863 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:40,863 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:40,863 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:23:40,863 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:40,863 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-12-07T12:23:40,880 DEBUG [M:0;27c6fcd7dac8:41517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98f945459b1d47b0b4dfddc8a795c180 is 82, key is hbase:meta,,1/info:regioninfo/1733574169836/Put/seqid=0 2024-12-07T12:23:40,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741850_1026 (size=5672) 2024-12-07T12:23:40,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741850_1026 (size=5672) 2024-12-07T12:23:40,885 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98f945459b1d47b0b4dfddc8a795c180 2024-12-07T12:23:40,906 DEBUG [M:0;27c6fcd7dac8:41517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99808431e14f4adf952969953053a9d4 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733574170259/Put/seqid=0 2024-12-07T12:23:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741851_1027 (size=7823) 2024-12-07T12:23:40,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741851_1027 (size=7823) 2024-12-07T12:23:40,911 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99808431e14f4adf952969953053a9d4 2024-12-07T12:23:40,917 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 99808431e14f4adf952969953053a9d4 2024-12-07T12:23:40,938 DEBUG [M:0;27c6fcd7dac8:41517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14d250f3de604da3a34eb5bbde67d33e is 69, key is 27c6fcd7dac8,44009,1733574169061/rs:state/1733574169295/Put/seqid=0 2024-12-07T12:23:40,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741852_1028 (size=5156) 2024-12-07T12:23:40,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741852_1028 (size=5156) 2024-12-07T12:23:40,944 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14d250f3de604da3a34eb5bbde67d33e 2024-12-07T12:23:40,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:23:40,951 INFO [RS:0;27c6fcd7dac8:44009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:23:40,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44009-0x1018ce07c3a0001, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:23:40,951 INFO [RS:0;27c6fcd7dac8:44009 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,44009,1733574169061; zookeeper connection closed. 2024-12-07T12:23:40,951 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5d329cf7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5d329cf7 2024-12-07T12:23:40,951 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:23:40,966 DEBUG [M:0;27c6fcd7dac8:41517 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4cab5af8fcc4c049bdd318c07e53538 is 52, key is load_balancer_on/state:d/1733574169885/Put/seqid=0 2024-12-07T12:23:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741853_1029 (size=5056) 2024-12-07T12:23:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741853_1029 (size=5056) 2024-12-07T12:23:40,972 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4cab5af8fcc4c049bdd318c07e53538 2024-12-07T12:23:40,978 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98f945459b1d47b0b4dfddc8a795c180 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98f945459b1d47b0b4dfddc8a795c180 2024-12-07T12:23:40,983 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98f945459b1d47b0b4dfddc8a795c180, entries=8, sequenceid=121, filesize=5.5 K 2024-12-07T12:23:40,983 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/99808431e14f4adf952969953053a9d4 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99808431e14f4adf952969953053a9d4 2024-12-07T12:23:40,988 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 99808431e14f4adf952969953053a9d4 2024-12-07T12:23:40,988 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/99808431e14f4adf952969953053a9d4, entries=14, sequenceid=121, filesize=7.6 K 2024-12-07T12:23:40,989 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/14d250f3de604da3a34eb5bbde67d33e as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14d250f3de604da3a34eb5bbde67d33e 2024-12-07T12:23:40,994 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/14d250f3de604da3a34eb5bbde67d33e, entries=1, sequenceid=121, filesize=5.0 K 2024-12-07T12:23:40,995 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e4cab5af8fcc4c049bdd318c07e53538 as hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4cab5af8fcc4c049bdd318c07e53538 2024-12-07T12:23:41,000 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45559/user/jenkins/test-data/9f8d7de7-686b-f5f7-8da2-27df902185dd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e4cab5af8fcc4c049bdd318c07e53538, entries=1, sequenceid=121, filesize=4.9 K 2024-12-07T12:23:41,001 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=121, compaction requested=false 2024-12-07T12:23:41,002 INFO [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:41,002 DEBUG [M:0;27c6fcd7dac8:41517 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574220863Disabling compacts and flushes for region at 1733574220863Disabling writes for close at 1733574220863Obtaining lock to block concurrent updates at 1733574220863Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574220863Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44641, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1733574220863Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574220864 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574220864Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574220879 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574220880 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574220890 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574220905 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574220905Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574220917 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574220938 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574220938Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574220949 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574220965 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574220965Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7352a270: reopening flushed file at 1733574220977 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f1b88e6: reopening flushed file at 1733574220983 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f38bb3c: reopening flushed file at 1733574220988 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15ee38bc: reopening flushed file at 1733574220994 (+6 ms)Finished flush of dataSize ~43.59 KB/44641, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=121, compaction requested=false at 1733574221001 (+7 ms)Writing region close event to WAL at 1733574221002 (+1 ms)Closed at 1733574221002 2024-12-07T12:23:41,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:41,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:41,003 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:41,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:41,003 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:23:41,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35739 is added to blk_1073741830_1006 (size=53038) 2024-12-07T12:23:41,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46431 is added to blk_1073741830_1006 (size=53038) 2024-12-07T12:23:41,006 INFO [M:0;27c6fcd7dac8:41517 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:23:41,006 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:23:41,006 INFO [M:0;27c6fcd7dac8:41517 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41517 2024-12-07T12:23:41,006 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:23:41,108 INFO [M:0;27c6fcd7dac8:41517 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:23:41,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:23:41,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41517-0x1018ce07c3a0000, quorum=127.0.0.1:59219, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:23:41,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a3c6b7a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:23:41,111 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1beefc80{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:23:41,111 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:23:41,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@555a4a92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:23:41,111 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@311facd9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,STOPPED} 2024-12-07T12:23:41,113 WARN [BP-37627011-172.17.0.2-1733574168256 heartbeating to localhost/127.0.0.1:45559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:23:41,113 WARN [BP-37627011-172.17.0.2-1733574168256 heartbeating to localhost/127.0.0.1:45559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-37627011-172.17.0.2-1733574168256 (Datanode Uuid 832863cd-9d1c-4c28-a7dd-cca5f36ef278) service to localhost/127.0.0.1:45559 2024-12-07T12:23:41,113 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:23:41,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:23:41,114 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data3/current/BP-37627011-172.17.0.2-1733574168256 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:23:41,114 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data4/current/BP-37627011-172.17.0.2-1733574168256 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:23:41,114 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:23:41,116 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aa33ca4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:23:41,116 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3be31a0b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:23:41,116 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:23:41,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9b25e94{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:23:41,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@507832d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,STOPPED} 2024-12-07T12:23:41,118 WARN [BP-37627011-172.17.0.2-1733574168256 heartbeating to localhost/127.0.0.1:45559 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:23:41,118 WARN [BP-37627011-172.17.0.2-1733574168256 heartbeating to localhost/127.0.0.1:45559 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-37627011-172.17.0.2-1733574168256 (Datanode Uuid 1e3cbf44-289e-460b-aa8a-7a07257f4aab) service to localhost/127.0.0.1:45559 2024-12-07T12:23:41,118 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:23:41,118 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:23:41,119 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data1/current/BP-37627011-172.17.0.2-1733574168256 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:23:41,119 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/cluster_6c4e5697-e95b-9fc2-b7d7-1dd1d6005eaf/data/data2/current/BP-37627011-172.17.0.2-1733574168256 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:23:41,119 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:23:41,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@188d3e33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:23:41,126 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2aaa4790{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:23:41,126 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:23:41,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1af676f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:23:41,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fb33a9d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir/,STOPPED} 2024-12-07T12:23:41,132 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:23:41,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:23:41,158 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45559 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45559 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:45559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/27c6fcd7dac8:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45559 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45559 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=32 (was 75), ProcessCount=11 (was 11), AvailableMemoryMB=6140 (was 6213) 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=32, ProcessCount=11, AvailableMemoryMB=6140 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.log.dir so I do NOT create it in target/test-data/af0c1146-9f3d-599c-6872-655041c5565a 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/43fc92c0-70d2-8108-d21d-32924d7ecb31/hadoop.tmp.dir so I do NOT create it in target/test-data/af0c1146-9f3d-599c-6872-655041c5565a 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b, deleteOnExit=true 2024-12-07T12:23:41,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/test.cache.data in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:23:41,167 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:23:41,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:23:41,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:23:41,181 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:23:41,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:41,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:41,245 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:23:41,249 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:23:41,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:23:41,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:23:41,250 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:23:41,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:23:41,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3def21d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:23:41,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e1b4695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:23:41,309 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:23:41,366 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76385d85{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/java.io.tmpdir/jetty-localhost-44797-hadoop-hdfs-3_4_1-tests_jar-_-any-4591661674176077262/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:23:41,366 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1776bdc2{HTTP/1.1, (http/1.1)}{localhost:44797} 2024-12-07T12:23:41,367 INFO [Time-limited test {}] server.Server(415): Started @238784ms 2024-12-07T12:23:41,379 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:23:41,431 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:23:41,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:23:41,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:23:41,453 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:23:41,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:23:41,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb23947{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:23:41,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60b9b83d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:23:41,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@631c133{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/java.io.tmpdir/jetty-localhost-45223-hadoop-hdfs-3_4_1-tests_jar-_-any-8977195344039061524/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:23:41,567 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3523e770{HTTP/1.1, (http/1.1)}{localhost:45223} 2024-12-07T12:23:41,567 INFO [Time-limited test {}] server.Server(415): Started @238985ms 2024-12-07T12:23:41,568 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:23:41,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:23:41,600 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:23:41,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:23:41,601 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:23:41,601 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T12:23:41,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ab86f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:23:41,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2fc2e7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:23:41,675 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data2/current/BP-1023889294-172.17.0.2-1733574221188/current, will proceed with Du for space computation calculation, 2024-12-07T12:23:41,675 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data1/current/BP-1023889294-172.17.0.2-1733574221188/current, will proceed with Du for space computation calculation, 2024-12-07T12:23:41,697 WARN [Thread-1954 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:23:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8fe0827d56dfdb3a with lease ID 0xb52a475dfa0a690b: Processing first storage report for DS-a27ba884-3779-421b-929b-4d601b9b15a1 from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ab8ed10e-5026-4b52-9a3c-4b2a8f070b36, infoPort=40849, infoSecurePort=0, ipcPort=36679, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188) 2024-12-07T12:23:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fe0827d56dfdb3a with lease ID 0xb52a475dfa0a690b: from storage DS-a27ba884-3779-421b-929b-4d601b9b15a1 node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ab8ed10e-5026-4b52-9a3c-4b2a8f070b36, infoPort=40849, infoSecurePort=0, ipcPort=36679, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:23:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8fe0827d56dfdb3a with lease ID 0xb52a475dfa0a690b: Processing first storage report for DS-4f1332d1-1958-45e4-8be4-2f839b2c3250 from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ab8ed10e-5026-4b52-9a3c-4b2a8f070b36, infoPort=40849, infoSecurePort=0, ipcPort=36679, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188) 2024-12-07T12:23:41,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8fe0827d56dfdb3a with lease ID 0xb52a475dfa0a690b: from storage DS-4f1332d1-1958-45e4-8be4-2f839b2c3250 node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=ab8ed10e-5026-4b52-9a3c-4b2a8f070b36, infoPort=40849, infoSecurePort=0, ipcPort=36679, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:23:41,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dcfcbff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/java.io.tmpdir/jetty-localhost-41365-hadoop-hdfs-3_4_1-tests_jar-_-any-2133544336659937756/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:23:41,718 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc4994c{HTTP/1.1, (http/1.1)}{localhost:41365} 2024-12-07T12:23:41,718 INFO [Time-limited test {}] server.Server(415): Started @239136ms 2024-12-07T12:23:41,719 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:23:41,812 WARN [Thread-2001 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data3/current/BP-1023889294-172.17.0.2-1733574221188/current, will proceed with Du for space computation calculation, 2024-12-07T12:23:41,812 WARN [Thread-2002 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data4/current/BP-1023889294-172.17.0.2-1733574221188/current, will proceed with Du for space computation calculation, 2024-12-07T12:23:41,830 WARN [Thread-1990 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:23:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd12378eed3e3629 with lease ID 0xb52a475dfa0a690c: Processing first storage report for DS-39f24566-da75-48a4-b474-19c0255ed770 from datanode DatanodeRegistration(127.0.0.1:41323, datanodeUuid=f387faad-8452-405f-ae97-7f76eee1d882, infoPort=37775, infoSecurePort=0, ipcPort=43909, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188) 2024-12-07T12:23:41,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd12378eed3e3629 with lease ID 0xb52a475dfa0a690c: from storage DS-39f24566-da75-48a4-b474-19c0255ed770 node DatanodeRegistration(127.0.0.1:41323, datanodeUuid=f387faad-8452-405f-ae97-7f76eee1d882, infoPort=37775, infoSecurePort=0, ipcPort=43909, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:23:41,832 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd12378eed3e3629 with lease ID 0xb52a475dfa0a690c: Processing first storage report for DS-4adf9b6d-edfc-4ee1-aa81-207a6cb56ad8 from datanode DatanodeRegistration(127.0.0.1:41323, datanodeUuid=f387faad-8452-405f-ae97-7f76eee1d882, infoPort=37775, infoSecurePort=0, ipcPort=43909, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188) 2024-12-07T12:23:41,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd12378eed3e3629 with lease ID 0xb52a475dfa0a690c: from storage DS-4adf9b6d-edfc-4ee1-aa81-207a6cb56ad8 node DatanodeRegistration(127.0.0.1:41323, datanodeUuid=f387faad-8452-405f-ae97-7f76eee1d882, infoPort=37775, infoSecurePort=0, ipcPort=43909, storageInfo=lv=-57;cid=testClusterID;nsid=1708256143;c=1733574221188), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:23:41,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a 2024-12-07T12:23:41,843 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/zookeeper_0, clientPort=55472, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:23:41,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55472 2024-12-07T12:23:41,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:23:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:23:41,856 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52 with version=8 2024-12-07T12:23:41,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:23:41,858 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:23:41,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,858 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:23:41,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:23:41,859 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:23:41,859 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:23:41,859 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44847 2024-12-07T12:23:41,861 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44847 connecting to ZooKeeper ensemble=127.0.0.1:55472 2024-12-07T12:23:41,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448470x0, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:23:41,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44847-0x1018ce14aa20000 connected 2024-12-07T12:23:41,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,887 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:23:41,887 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52, hbase.cluster.distributed=false 2024-12-07T12:23:41,888 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:23:41,890 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44847 2024-12-07T12:23:41,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44847 2024-12-07T12:23:41,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44847 2024-12-07T12:23:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44847 2024-12-07T12:23:41,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44847 2024-12-07T12:23:41,909 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:23:41,909 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:23:41,910 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37961 2024-12-07T12:23:41,911 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37961 connecting to ZooKeeper ensemble=127.0.0.1:55472 2024-12-07T12:23:41,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379610x0, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:23:41,919 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379610x0, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:23:41,919 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37961-0x1018ce14aa20001 connected 2024-12-07T12:23:41,919 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:23:41,920 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:23:41,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:23:41,921 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:23:41,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37961 2024-12-07T12:23:41,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37961 2024-12-07T12:23:41,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37961 2024-12-07T12:23:41,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37961 2024-12-07T12:23:41,923 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37961 2024-12-07T12:23:41,934 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:44847 2024-12-07T12:23:41,935 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:41,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:23:41,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:23:41,937 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:41,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:23:41,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:41,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:41,938 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:23:41,939 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,44847,1733574221858 from backup master directory 2024-12-07T12:23:41,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:41,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:23:41,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:23:41,940 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:23:41,940 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:41,944 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/hbase.id] with ID: 9d01219d-d558-44c9-9fff-e8cb71050135 2024-12-07T12:23:41,944 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/.tmp/hbase.id 2024-12-07T12:23:41,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:23:41,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:23:41,950 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/.tmp/hbase.id]:[hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/hbase.id] 2024-12-07T12:23:41,960 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:41,961 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:23:41,962 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T12:23:41,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:41,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:41,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:23:41,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:23:41,975 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:23:41,976 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:23:41,976 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:23:41,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:23:41,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:23:41,983 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store 2024-12-07T12:23:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:23:41,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:23:41,989 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:41,989 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:23:41,989 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574221989Disabling compacts and flushes for region at 1733574221989Disabling writes for close at 1733574221989Writing region close event to WAL at 1733574221989Closed at 1733574221989 2024-12-07T12:23:41,989 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/.initializing 2024-12-07T12:23:41,990 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/WALs/27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:41,992 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C44847%2C1733574221858, suffix=, logDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/WALs/27c6fcd7dac8,44847,1733574221858, archiveDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/oldWALs, maxLogs=10 2024-12-07T12:23:41,992 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C44847%2C1733574221858.1733574221992 2024-12-07T12:23:41,996 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/WALs/27c6fcd7dac8,44847,1733574221858/27c6fcd7dac8%2C44847%2C1733574221858.1733574221992 2024-12-07T12:23:41,998 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37775:37775),(127.0.0.1/127.0.0.1:40849:40849)] 2024-12-07T12:23:42,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:23:42,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:42,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:23:42,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,007 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:23:42,007 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:23:42,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:23:42,009 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:23:42,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:23:42,010 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:23:42,011 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,011 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,011 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,013 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,013 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,013 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:23:42,014 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:23:42,016 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:23:42,016 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714230, jitterRate=-0.09180977940559387}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:23:42,017 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574222004Initializing all the Stores at 1733574222004Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222004Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574222004Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574222004Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574222004Cleaning up temporary data from old regions at 1733574222013 (+9 ms)Region opened successfully at 1733574222017 (+4 ms) 2024-12-07T12:23:42,017 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:23:42,020 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8a5398, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:23:42,021 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:23:42,021 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:23:42,021 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:23:42,021 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:23:42,021 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:23:42,022 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:23:42,022 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:23:42,024 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:23:42,024 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:23:42,025 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:23:42,026 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:23:42,026 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:23:42,027 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:23:42,028 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:23:42,028 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:23:42,029 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:23:42,030 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:23:42,034 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:23:42,036 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:23:42,037 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:23:42,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:23:42,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:23:42,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,039 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,44847,1733574221858, sessionid=0x1018ce14aa20000, setting cluster-up flag (Was=false) 2024-12-07T12:23:42,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,046 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:23:42,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:42,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,056 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:23:42,057 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:42,058 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:23:42,060 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:23:42,060 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:23:42,060 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:23:42,060 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,44847,1733574221858 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:23:42,061 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:23:42,062 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,062 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574252062 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:23:42,063 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:23:42,063 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:23:42,064 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:23:42,064 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574222064,5,FailOnTimeoutGroup] 2024-12-07T12:23:42,064 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574222064,5,FailOnTimeoutGroup] 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,064 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,065 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,065 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:23:42,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:23:42,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:23:42,071 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:23:42,071 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52 2024-12-07T12:23:42,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:23:42,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:23:42,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:42,077 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:23:42,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:23:42,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:23:42,080 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:23:42,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:23:42,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:23:42,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:23:42,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:23:42,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:23:42,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740 2024-12-07T12:23:42,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740 2024-12-07T12:23:42,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:23:42,084 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:23:42,085 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:23:42,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:23:42,087 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:23:42,088 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850217, jitterRate=0.08110694587230682}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:23:42,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574222076Initializing all the Stores at 1733574222077 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222077Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222077Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574222077Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222077Cleaning up temporary data from old regions at 1733574222084 (+7 ms)Region opened successfully at 1733574222088 (+4 ms) 2024-12-07T12:23:42,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:23:42,088 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:23:42,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:23:42,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:23:42,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:23:42,089 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:23:42,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574222088Disabling compacts and flushes for region at 1733574222088Disabling writes for close at 1733574222088Writing region close event to WAL at 1733574222089 (+1 ms)Closed at 1733574222089 2024-12-07T12:23:42,090 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:23:42,090 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:23:42,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:23:42,091 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:23:42,092 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:23:42,124 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(746): ClusterId : 9d01219d-d558-44c9-9fff-e8cb71050135 2024-12-07T12:23:42,124 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:23:42,126 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:23:42,126 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:23:42,128 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:23:42,128 DEBUG [RS:0;27c6fcd7dac8:37961 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a6f5312, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:23:42,140 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:37961 2024-12-07T12:23:42,140 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:23:42,140 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:23:42,140 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:23:42,141 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,44847,1733574221858 with port=37961, startcode=1733574221909 2024-12-07T12:23:42,141 DEBUG [RS:0;27c6fcd7dac8:37961 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:23:42,143 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:23:42,143 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,144 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,145 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52 2024-12-07T12:23:42,145 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42271 2024-12-07T12:23:42,145 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:23:42,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:23:42,147 DEBUG [RS:0;27c6fcd7dac8:37961 {}] zookeeper.ZKUtil(111): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,147 WARN [RS:0;27c6fcd7dac8:37961 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:23:42,147 INFO [RS:0;27c6fcd7dac8:37961 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:23:42,147 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,37961,1733574221909] 2024-12-07T12:23:42,151 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:23:42,152 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:23:42,152 INFO [RS:0;27c6fcd7dac8:37961 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:23:42,152 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,153 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:23:42,154 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:23:42,154 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:23:42,154 DEBUG [RS:0;27c6fcd7dac8:37961 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:23:42,155 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,155 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,155 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,156 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,156 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,156 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,37961,1733574221909-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:23:42,171 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:23:42,171 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,37961,1733574221909-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,171 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,171 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.Replication(171): 27c6fcd7dac8,37961,1733574221909 started 2024-12-07T12:23:42,185 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,185 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,37961,1733574221909, RpcServer on 27c6fcd7dac8/172.17.0.2:37961, sessionid=0x1018ce14aa20001 2024-12-07T12:23:42,185 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:23:42,185 DEBUG [RS:0;27c6fcd7dac8:37961 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,185 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,37961,1733574221909' 2024-12-07T12:23:42,185 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:23:42,185 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,37961,1733574221909' 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:23:42,186 DEBUG [RS:0;27c6fcd7dac8:37961 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:23:42,186 INFO [RS:0;27c6fcd7dac8:37961 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:23:42,186 INFO [RS:0;27c6fcd7dac8:37961 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:23:42,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:42,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:42,242 WARN [27c6fcd7dac8:44847 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:23:42,288 INFO [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C37961%2C1733574221909, suffix=, logDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909, archiveDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs, maxLogs=32 2024-12-07T12:23:42,289 INFO [RS:0;27c6fcd7dac8:37961 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 2024-12-07T12:23:42,294 INFO [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 2024-12-07T12:23:42,295 DEBUG [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40849:40849),(127.0.0.1/127.0.0.1:37775:37775)] 2024-12-07T12:23:42,492 DEBUG [27c6fcd7dac8:44847 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:23:42,493 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,494 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,37961,1733574221909, state=OPENING 2024-12-07T12:23:42,498 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:23:42,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:23:42,499 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:23:42,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:23:42,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:23:42,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,37961,1733574221909}] 2024-12-07T12:23:42,652 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:23:42,655 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41983, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:23:42,659 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:23:42,659 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:23:42,660 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C37961%2C1733574221909.meta, suffix=.meta, logDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909, archiveDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs, maxLogs=32 2024-12-07T12:23:42,661 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C37961%2C1733574221909.meta.1733574222661.meta 2024-12-07T12:23:42,666 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.meta.1733574222661.meta 2024-12-07T12:23:42,666 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37775:37775),(127.0.0.1/127.0.0.1:40849:40849)] 2024-12-07T12:23:42,667 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:23:42,667 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:23:42,668 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:23:42,668 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:23:42,668 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:23:42,668 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:42,668 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:23:42,668 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:23:42,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:23:42,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:23:42,670 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:23:42,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:23:42,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:23:42,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:23:42,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:23:42,673 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:23:42,673 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:23:42,674 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:23:42,674 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740 2024-12-07T12:23:42,675 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740 2024-12-07T12:23:42,676 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:23:42,676 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:23:42,677 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:23:42,678 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:23:42,678 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694394, jitterRate=-0.11703258752822876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:23:42,678 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:23:42,679 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574222668Writing region info on filesystem at 1733574222668Initializing all the Stores at 1733574222669 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222669Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222669Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574222669Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574222669Cleaning up temporary data from old regions at 1733574222676 (+7 ms)Running coprocessor post-open hooks at 1733574222678 (+2 ms)Region opened successfully at 1733574222679 (+1 ms) 2024-12-07T12:23:42,680 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574222652 2024-12-07T12:23:42,682 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:23:42,682 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:23:42,683 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,683 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,37961,1733574221909, state=OPEN 2024-12-07T12:23:42,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:23:42,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:23:42,687 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:23:42,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:23:42,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:23:42,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,37961,1733574221909 in 188 msec 2024-12-07T12:23:42,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:23:42,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 600 msec 2024-12-07T12:23:42,693 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:23:42,693 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:23:42,694 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:23:42,694 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,37961,1733574221909, seqNum=-1] 2024-12-07T12:23:42,694 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:23:42,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39801, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:23:42,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 640 msec 2024-12-07T12:23:42,701 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574222701, completionTime=-1 2024-12-07T12:23:42,701 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:23:42,701 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:23:42,702 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:23:42,702 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574282702 2024-12-07T12:23:42,702 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574342702 2024-12-07T12:23:42,702 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:44847, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,703 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,704 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:23:42,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.766sec 2024-12-07T12:23:42,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:23:42,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:23:42,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:23:42,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:23:42,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:23:42,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:23:42,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:23:42,709 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:23:42,709 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:23:42,709 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,44847,1733574221858-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:23:42,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d663ceb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:23:42,725 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,44847,-1 for getting cluster id 2024-12-07T12:23:42,725 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:23:42,726 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9d01219d-d558-44c9-9fff-e8cb71050135' 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9d01219d-d558-44c9-9fff-e8cb71050135" 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d4bc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,44847,-1] 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:23:42,727 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:23:42,728 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37104, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:23:42,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651a5cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:23:42,729 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:23:42,730 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,37961,1733574221909, seqNum=-1] 2024-12-07T12:23:42,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:23:42,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35016, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:23:42,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:42,733 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:23:42,736 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:23:42,737 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T12:23:42,737 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 27c6fcd7dac8,44847,1733574221858 2024-12-07T12:23:42,738 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34648aa 2024-12-07T12:23:42,738 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T12:23:42,739 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37110, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T12:23:42,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T12:23:42,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T12:23:42,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:23:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-07T12:23:42,742 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T12:23:42,742 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:42,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-07T12:23:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:23:42,743 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T12:23:42,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741835_1011 (size=381) 2024-12-07T12:23:42,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741835_1011 (size=381) 2024-12-07T12:23:42,752 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 613515ffd88b0d443fd5e64f291ab31e, NAME => 'TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52 2024-12-07T12:23:42,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741836_1012 (size=64) 2024-12-07T12:23:42,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741836_1012 (size=64) 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 613515ffd88b0d443fd5e64f291ab31e, disabling compactions & flushes 2024-12-07T12:23:42,758 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. after waiting 0 ms 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:42,758 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:42,758 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 613515ffd88b0d443fd5e64f291ab31e: Waiting for close lock at 1733574222758Disabling compacts and flushes for region at 1733574222758Disabling writes for close at 1733574222758Writing region close event to WAL at 1733574222758Closed at 1733574222758 2024-12-07T12:23:42,760 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T12:23:42,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733574222760"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574222760"}]},"ts":"1733574222760"} 2024-12-07T12:23:42,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T12:23:42,763 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T12:23:42,763 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574222763"}]},"ts":"1733574222763"} 2024-12-07T12:23:42,765 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-07T12:23:42,765 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, ASSIGN}] 2024-12-07T12:23:42,767 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, ASSIGN 2024-12-07T12:23:42,767 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, ASSIGN; state=OFFLINE, location=27c6fcd7dac8,37961,1733574221909; forceNewPlan=false, retain=false 2024-12-07T12:23:42,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=613515ffd88b0d443fd5e64f291ab31e, regionState=OPENING, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:42,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, ASSIGN because future has completed 2024-12-07T12:23:42,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909}] 2024-12-07T12:23:43,077 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:43,077 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 613515ffd88b0d443fd5e64f291ab31e, NAME => 'TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:23:43,078 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,078 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:23:43,078 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,078 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,079 INFO [StoreOpener-613515ffd88b0d443fd5e64f291ab31e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,080 INFO [StoreOpener-613515ffd88b0d443fd5e64f291ab31e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 613515ffd88b0d443fd5e64f291ab31e columnFamilyName info 2024-12-07T12:23:43,080 DEBUG [StoreOpener-613515ffd88b0d443fd5e64f291ab31e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:23:43,081 INFO [StoreOpener-613515ffd88b0d443fd5e64f291ab31e-1 {}] regionserver.HStore(327): Store=613515ffd88b0d443fd5e64f291ab31e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:23:43,081 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,081 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,082 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,082 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,082 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,083 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,085 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:23:43,085 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 613515ffd88b0d443fd5e64f291ab31e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767612, jitterRate=-0.023931920528411865}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:23:43,085 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:43,086 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 613515ffd88b0d443fd5e64f291ab31e: Running coprocessor pre-open hook at 1733574223078Writing region info on filesystem at 1733574223078Initializing all the Stores at 1733574223079 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574223079Cleaning up temporary data from old regions at 1733574223082 (+3 ms)Running coprocessor post-open hooks at 1733574223085 (+3 ms)Region opened successfully at 1733574223086 (+1 ms) 2024-12-07T12:23:43,087 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., pid=6, masterSystemTime=1733574223074 2024-12-07T12:23:43,089 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:43,089 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:43,090 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=613515ffd88b0d443fd5e64f291ab31e, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:43,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 because future has completed 2024-12-07T12:23:43,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T12:23:43,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 in 172 msec 2024-12-07T12:23:43,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T12:23:43,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, ASSIGN in 330 msec 2024-12-07T12:23:43,098 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T12:23:43,099 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733574223098"}]},"ts":"1733574223098"} 2024-12-07T12:23:43,100 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-07T12:23:43,101 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T12:23:43,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-12-07T12:23:43,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:43,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:44,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:44,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:45,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:45,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,795 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:23:45,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:45,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:46,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:46,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:47,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:47,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:48,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:23:48,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-07T12:23:48,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:48,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:48,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-07T12:23:48,276 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T12:23:48,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T12:23:49,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:49,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:50,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:50,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:51,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:51,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:52,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:52,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T12:23:52,806 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-07T12:23:52,807 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-07T12:23:52,809 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-07T12:23:52,809 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:23:52,812 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2] 2024-12-07T12:23:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:23:52,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:23:52,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/5ca8af0f64c248aa81991ba608a9ed7f is 1080, key is row0001/info:/1733574232813/Put/seqid=0 2024-12-07T12:23:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741837_1013 (size=12509) 2024-12-07T12:23:52,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741837_1013 (size=12509) 2024-12-07T12:23:52,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/5ca8af0f64c248aa81991ba608a9ed7f 2024-12-07T12:23:52,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/5ca8af0f64c248aa81991ba608a9ed7f as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f 2024-12-07T12:23:52,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-07T12:23:52,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f, entries=7, sequenceid=11, filesize=12.2 K 2024-12-07T12:23:52,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 613515ffd88b0d443fd5e64f291ab31e in 38ms, sequenceid=11, compaction requested=false 2024-12-07T12:23:52,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:23:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35016 deadline: 1733574242859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:23:52,883 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:23:52,884 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:23:52,884 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 because the exception is null or not the one we care about 2024-12-07T12:23:53,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:53,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:53,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:23:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:53,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:23:54,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:54,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:55,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:55,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:56,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:56,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:57,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:57,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:58,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:58,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:59,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:23:59,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:00,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:00,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:01,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:01,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:02,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:02,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:02,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-07T12:24:02,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/cc122be8c7174af7a20743912f146e5f is 1080, key is row0008/info:/1733574232824/Put/seqid=0 2024-12-07T12:24:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741838_1014 (size=29761) 2024-12-07T12:24:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741838_1014 (size=29761) 2024-12-07T12:24:02,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/cc122be8c7174af7a20743912f146e5f 2024-12-07T12:24:02,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/cc122be8c7174af7a20743912f146e5f as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f 2024-12-07T12:24:02,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f, entries=23, sequenceid=37, filesize=29.1 K 2024-12-07T12:24:02,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 613515ffd88b0d443fd5e64f291ab31e in 22ms, sequenceid=37, compaction requested=false 2024-12-07T12:24:02,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:02,981 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-07T12:24:02,981 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:02,981 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f because midkey is the same as first or last row 2024-12-07T12:24:03,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:03,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:04,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:04,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:04,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:24:04,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/a59ceddf7d8c47a4b40524eba44f3320 is 1080, key is row0031/info:/1733574242959/Put/seqid=0 2024-12-07T12:24:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741839_1015 (size=12509) 2024-12-07T12:24:04,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741839_1015 (size=12509) 2024-12-07T12:24:04,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/a59ceddf7d8c47a4b40524eba44f3320 2024-12-07T12:24:04,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/a59ceddf7d8c47a4b40524eba44f3320 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320 2024-12-07T12:24:04,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320, entries=7, sequenceid=47, filesize=12.2 K 2024-12-07T12:24:04,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 613515ffd88b0d443fd5e64f291ab31e in 24ms, sequenceid=47, compaction requested=true 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f because midkey is the same as first or last row 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 613515ffd88b0d443fd5e64f291ab31e:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:04,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:04,994 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:04,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-07T12:24:04,996 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:04,996 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 613515ffd88b0d443fd5e64f291ab31e/info is initiating minor compaction (all files) 2024-12-07T12:24:04,996 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 613515ffd88b0d443fd5e64f291ab31e/info in TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:04,996 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp, totalSize=53.5 K 2024-12-07T12:24:04,996 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ca8af0f64c248aa81991ba608a9ed7f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733574232813 2024-12-07T12:24:04,997 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc122be8c7174af7a20743912f146e5f, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733574232824 2024-12-07T12:24:04,997 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting a59ceddf7d8c47a4b40524eba44f3320, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733574242959 2024-12-07T12:24:04,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/d5f3999e662446eba8cef87fa0f6b14c is 1080, key is row0038/info:/1733574244971/Put/seqid=0 2024-12-07T12:24:05,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741840_1016 (size=22222) 2024-12-07T12:24:05,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741840_1016 (size=22222) 2024-12-07T12:24:05,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/d5f3999e662446eba8cef87fa0f6b14c 2024-12-07T12:24:05,014 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 613515ffd88b0d443fd5e64f291ab31e#info#compaction#61 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:05,014 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/b9450271d6ad4680a9f6d5dd7e44e05f is 1080, key is row0001/info:/1733574232813/Put/seqid=0 2024-12-07T12:24:05,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/d5f3999e662446eba8cef87fa0f6b14c as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c 2024-12-07T12:24:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741841_1017 (size=44978) 2024-12-07T12:24:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741841_1017 (size=44978) 2024-12-07T12:24:05,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c, entries=16, sequenceid=66, filesize=21.7 K 2024-12-07T12:24:05,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for 613515ffd88b0d443fd5e64f291ab31e in 25ms, sequenceid=66, compaction requested=false 2024-12-07T12:24:05,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:05,021 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.2 K, sizeToCheck=16.0 K 2024-12-07T12:24:05,021 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:05,021 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f because midkey is the same as first or last row 2024-12-07T12:24:05,025 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/b9450271d6ad4680a9f6d5dd7e44e05f as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f 2024-12-07T12:24:05,030 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 613515ffd88b0d443fd5e64f291ab31e/info of 613515ffd88b0d443fd5e64f291ab31e into b9450271d6ad4680a9f6d5dd7e44e05f(size=43.9 K), total size for store is 65.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:05,030 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., storeName=613515ffd88b0d443fd5e64f291ab31e/info, priority=13, startTime=1733574244994; duration=0sec 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f because midkey is the same as first or last row 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:05,030 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f because midkey is the same as first or last row 2024-12-07T12:24:05,031 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.6 K, sizeToCheck=16.0 K 2024-12-07T12:24:05,031 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:05,031 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f because midkey is the same as first or last row 2024-12-07T12:24:05,031 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:05,031 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 613515ffd88b0d443fd5e64f291ab31e:info 2024-12-07T12:24:05,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:05,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:06,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:06,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:07,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T12:24:07,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/6707b2540feb45fb940663480c2dbb6b is 1080, key is row0054/info:/1733574244996/Put/seqid=0 2024-12-07T12:24:07,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741842_1018 (size=17894) 2024-12-07T12:24:07,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741842_1018 (size=17894) 2024-12-07T12:24:07,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/6707b2540feb45fb940663480c2dbb6b 2024-12-07T12:24:07,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/6707b2540feb45fb940663480c2dbb6b as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b 2024-12-07T12:24:07,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b, entries=12, sequenceid=82, filesize=17.5 K 2024-12-07T12:24:07,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for 613515ffd88b0d443fd5e64f291ab31e in 22ms, sequenceid=82, compaction requested=true 2024-12-07T12:24:07,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f because midkey is the same as first or last row 2024-12-07T12:24:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 613515ffd88b0d443fd5e64f291ab31e:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,040 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:07,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-07T12:24:07,041 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:07,041 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 613515ffd88b0d443fd5e64f291ab31e/info is initiating minor compaction (all files) 2024-12-07T12:24:07,041 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 613515ffd88b0d443fd5e64f291ab31e/info in TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:07,041 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp, totalSize=83.1 K 2024-12-07T12:24:07,042 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting b9450271d6ad4680a9f6d5dd7e44e05f, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733574232813 2024-12-07T12:24:07,042 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5f3999e662446eba8cef87fa0f6b14c, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1733574244971 2024-12-07T12:24:07,042 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6707b2540feb45fb940663480c2dbb6b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733574244996 2024-12-07T12:24:07,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/37b2c8f9ca294facbe1e53872672b854 is 1080, key is row0066/info:/1733574247018/Put/seqid=0 2024-12-07T12:24:07,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741843_1019 (size=21141) 2024-12-07T12:24:07,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/37b2c8f9ca294facbe1e53872672b854 2024-12-07T12:24:07,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741843_1019 (size=21141) 2024-12-07T12:24:07,055 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 613515ffd88b0d443fd5e64f291ab31e#info#compaction#64 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:07,056 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/768e89cb157e41b196e720fdebfc2fc4 is 1080, key is row0001/info:/1733574232813/Put/seqid=0 2024-12-07T12:24:07,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/37b2c8f9ca294facbe1e53872672b854 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/37b2c8f9ca294facbe1e53872672b854 2024-12-07T12:24:07,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741844_1020 (size=75378) 2024-12-07T12:24:07,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741844_1020 (size=75378) 2024-12-07T12:24:07,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/37b2c8f9ca294facbe1e53872672b854, entries=15, sequenceid=100, filesize=20.6 K 2024-12-07T12:24:07,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 613515ffd88b0d443fd5e64f291ab31e in 23ms, sequenceid=100, compaction requested=false 2024-12-07T12:24:07,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:07,063 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.7 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,063 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,063 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f because midkey is the same as first or last row 2024-12-07T12:24:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-07T12:24:07,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/9e312fa3adda46e9a36e4b093a10eafa is 1080, key is row0081/info:/1733574247041/Put/seqid=0 2024-12-07T12:24:07,071 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/768e89cb157e41b196e720fdebfc2fc4 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4 2024-12-07T12:24:07,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741845_1021 (size=18987) 2024-12-07T12:24:07,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741845_1021 (size=18987) 2024-12-07T12:24:07,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/9e312fa3adda46e9a36e4b093a10eafa 2024-12-07T12:24:07,076 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 613515ffd88b0d443fd5e64f291ab31e/info of 613515ffd88b0d443fd5e64f291ab31e into 768e89cb157e41b196e720fdebfc2fc4(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:07,077 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., storeName=613515ffd88b0d443fd5e64f291ab31e/info, priority=13, startTime=1733574247040; duration=0sec 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,077 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/9e312fa3adda46e9a36e4b093a10eafa as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/9e312fa3adda46e9a36e4b093a10eafa 2024-12-07T12:24:07,078 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,078 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,078 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 613515ffd88b0d443fd5e64f291ab31e:info 2024-12-07T12:24:07,079 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] assignment.AssignmentManager(1363): Split request from 27c6fcd7dac8,37961,1733574221909, parent={ENCODED => 613515ffd88b0d443fd5e64f291ab31e, NAME => 'TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-07T12:24:07,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/9e312fa3adda46e9a36e4b093a10eafa, entries=13, sequenceid=116, filesize=18.5 K 2024-12-07T12:24:07,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for 613515ffd88b0d443fd5e64f291ab31e in 19ms, sequenceid=116, compaction requested=true 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 613515ffd88b0d443fd5e64f291ab31e: 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-12-07T12:24:07,083 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T12:24:07,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-12-07T12:24:07,085 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,089 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=5e39b00e357ba7c1ab410abfa44e6cc6, daughterB=410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,090 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=5e39b00e357ba7c1ab410abfa44e6cc6, daughterB=410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,090 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=5e39b00e357ba7c1ab410abfa44e6cc6, daughterB=410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,090 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=5e39b00e357ba7c1ab410abfa44e6cc6, daughterB=410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,091 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] assignment.AssignmentManager(1363): Split request from 27c6fcd7dac8,37961,1733574221909, parent={ENCODED => 613515ffd88b0d443fd5e64f291ab31e, NAME => 'TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-07T12:24:07,092 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,093 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44847 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 2024-12-07T12:24:07,093 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 held by pid=7 2024-12-07T12:24:07,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, UNASSIGN}] 2024-12-07T12:24:07,102 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-12-07T12:24:07,102 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, UNASSIGN 2024-12-07T12:24:07,102 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 2024-12-07T12:24:07,103 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=613515ffd88b0d443fd5e64f291ab31e, regionState=CLOSING, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, UNASSIGN because future has completed 2024-12-07T12:24:07,106 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T12:24:07,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909}] 2024-12-07T12:24:07,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:07,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:07,263 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,263 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-07T12:24:07,263 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 613515ffd88b0d443fd5e64f291ab31e, disabling compactions & flushes 2024-12-07T12:24:07,263 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:07,263 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:07,263 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. after waiting 0 ms 2024-12-07T12:24:07,263 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:07,264 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 613515ffd88b0d443fd5e64f291ab31e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-07T12:24:07,268 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/f1b8c5fcf1c7450fa437fb125ba647e2 is 1080, key is row0094/info:/1733574247065/Put/seqid=0 2024-12-07T12:24:07,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741846_1022 (size=8193) 2024-12-07T12:24:07,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741846_1022 (size=8193) 2024-12-07T12:24:07,273 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/f1b8c5fcf1c7450fa437fb125ba647e2 2024-12-07T12:24:07,278 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/.tmp/info/f1b8c5fcf1c7450fa437fb125ba647e2 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/f1b8c5fcf1c7450fa437fb125ba647e2 2024-12-07T12:24:07,282 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/f1b8c5fcf1c7450fa437fb125ba647e2, entries=3, sequenceid=123, filesize=8.0 K 2024-12-07T12:24:07,283 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 613515ffd88b0d443fd5e64f291ab31e in 20ms, sequenceid=123, compaction requested=true 2024-12-07T12:24:07,284 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b] to archive 2024-12-07T12:24:07,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:24:07,286 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/5ca8af0f64c248aa81991ba608a9ed7f 2024-12-07T12:24:07,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/cc122be8c7174af7a20743912f146e5f 2024-12-07T12:24:07,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/b9450271d6ad4680a9f6d5dd7e44e05f 2024-12-07T12:24:07,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/a59ceddf7d8c47a4b40524eba44f3320 2024-12-07T12:24:07,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/d5f3999e662446eba8cef87fa0f6b14c 2024-12-07T12:24:07,291 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/6707b2540feb45fb940663480c2dbb6b 2024-12-07T12:24:07,297 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-12-07T12:24:07,297 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. 2024-12-07T12:24:07,297 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 613515ffd88b0d443fd5e64f291ab31e: Waiting for close lock at 1733574247263Running coprocessor pre-close hooks at 1733574247263Disabling compacts and flushes for region at 1733574247263Disabling writes for close at 1733574247263Obtaining lock to block concurrent updates at 1733574247264 (+1 ms)Preparing flush snapshotting stores in 613515ffd88b0d443fd5e64f291ab31e at 1733574247264Finished memstore snapshotting TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733574247264Flushing stores of TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. at 1733574247264Flushing 613515ffd88b0d443fd5e64f291ab31e/info: creating writer at 1733574247265 (+1 ms)Flushing 613515ffd88b0d443fd5e64f291ab31e/info: appending metadata at 1733574247267 (+2 ms)Flushing 613515ffd88b0d443fd5e64f291ab31e/info: closing flushed file at 1733574247267Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@235da033: reopening flushed file at 1733574247277 (+10 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 613515ffd88b0d443fd5e64f291ab31e in 20ms, sequenceid=123, compaction requested=true at 1733574247283 (+6 ms)Writing region close event to WAL at 1733574247294 (+11 ms)Running coprocessor post-close hooks at 1733574247297 (+3 ms)Closed at 1733574247297 2024-12-07T12:24:07,299 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,300 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=613515ffd88b0d443fd5e64f291ab31e, regionState=CLOSED 2024-12-07T12:24:07,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 because future has completed 2024-12-07T12:24:07,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T12:24:07,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 613515ffd88b0d443fd5e64f291ab31e, server=27c6fcd7dac8,37961,1733574221909 in 197 msec 2024-12-07T12:24:07,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-07T12:24:07,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=613515ffd88b0d443fd5e64f291ab31e, UNASSIGN in 208 msec 2024-12-07T12:24:07,314 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:24:07,318 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=613515ffd88b0d443fd5e64f291ab31e, threads=4 2024-12-07T12:24:07,320 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,320 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/9e312fa3adda46e9a36e4b093a10eafa for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,320 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/f1b8c5fcf1c7450fa437fb125ba647e2 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,320 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/37b2c8f9ca294facbe1e53872672b854 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,329 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/9e312fa3adda46e9a36e4b093a10eafa, top=true 2024-12-07T12:24:07,329 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/37b2c8f9ca294facbe1e53872672b854, top=true 2024-12-07T12:24:07,335 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/f1b8c5fcf1c7450fa437fb125ba647e2, top=true 2024-12-07T12:24:07,341 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854 for child: 410450476a93cc6ca0ae98001a6fdb48, parent: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,341 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa for child: 410450476a93cc6ca0ae98001a6fdb48, parent: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,341 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/37b2c8f9ca294facbe1e53872672b854 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,342 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/9e312fa3adda46e9a36e4b093a10eafa for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,345 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2 for child: 410450476a93cc6ca0ae98001a6fdb48, parent: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,345 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/f1b8c5fcf1c7450fa437fb125ba647e2 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741847_1023 (size=27) 2024-12-07T12:24:07,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741847_1023 (size=27) 2024-12-07T12:24:07,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741848_1024 (size=27) 2024-12-07T12:24:07,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741848_1024 (size=27) 2024-12-07T12:24:07,356 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4 for region: 613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:07,358 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 613515ffd88b0d443fd5e64f291ab31e Daughter A: [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e] storefiles, Daughter B: [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2] storefiles. 2024-12-07T12:24:07,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741849_1025 (size=71) 2024-12-07T12:24:07,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741849_1025 (size=71) 2024-12-07T12:24:07,366 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:24:07,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741850_1026 (size=71) 2024-12-07T12:24:07,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741850_1026 (size=71) 2024-12-07T12:24:07,378 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:24:07,388 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-07T12:24:07,391 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-07T12:24:07,393 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733574247393"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733574247393"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733574247393"}]},"ts":"1733574247393"} 2024-12-07T12:24:07,393 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733574247393"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574247393"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733574247393"}]},"ts":"1733574247393"} 2024-12-07T12:24:07,393 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733574247393"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733574247393"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733574247393"}]},"ts":"1733574247393"} 2024-12-07T12:24:07,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e39b00e357ba7c1ab410abfa44e6cc6, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=410450476a93cc6ca0ae98001a6fdb48, ASSIGN}] 2024-12-07T12:24:07,412 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=410450476a93cc6ca0ae98001a6fdb48, ASSIGN 2024-12-07T12:24:07,412 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e39b00e357ba7c1ab410abfa44e6cc6, ASSIGN 2024-12-07T12:24:07,413 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e39b00e357ba7c1ab410abfa44e6cc6, ASSIGN; state=SPLITTING_NEW, location=27c6fcd7dac8,37961,1733574221909; forceNewPlan=false, retain=false 2024-12-07T12:24:07,413 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=410450476a93cc6ca0ae98001a6fdb48, ASSIGN; state=SPLITTING_NEW, location=27c6fcd7dac8,37961,1733574221909; forceNewPlan=false, retain=false 2024-12-07T12:24:07,564 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=410450476a93cc6ca0ae98001a6fdb48, regionState=OPENING, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,564 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=5e39b00e357ba7c1ab410abfa44e6cc6, regionState=OPENING, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=410450476a93cc6ca0ae98001a6fdb48, ASSIGN because future has completed 2024-12-07T12:24:07,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909}] 2024-12-07T12:24:07,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e39b00e357ba7c1ab410abfa44e6cc6, ASSIGN because future has completed 2024-12-07T12:24:07,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e39b00e357ba7c1ab410abfa44e6cc6, server=27c6fcd7dac8,37961,1733574221909}] 2024-12-07T12:24:07,722 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:07,722 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 5e39b00e357ba7c1ab410abfa44e6cc6, NAME => 'TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-07T12:24:07,723 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,723 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:24:07,723 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,723 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,724 INFO [StoreOpener-5e39b00e357ba7c1ab410abfa44e6cc6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,725 INFO [StoreOpener-5e39b00e357ba7c1ab410abfa44e6cc6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e39b00e357ba7c1ab410abfa44e6cc6 columnFamilyName info 2024-12-07T12:24:07,725 DEBUG [StoreOpener-5e39b00e357ba7c1ab410abfa44e6cc6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:24:07,736 DEBUG [StoreOpener-5e39b00e357ba7c1ab410abfa44e6cc6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-bottom 2024-12-07T12:24:07,737 INFO [StoreOpener-5e39b00e357ba7c1ab410abfa44e6cc6-1 {}] regionserver.HStore(327): Store=5e39b00e357ba7c1ab410abfa44e6cc6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:24:07,737 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,738 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,739 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,739 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,739 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,741 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,741 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 5e39b00e357ba7c1ab410abfa44e6cc6; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755495, jitterRate=-0.03933882713317871}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:24:07,742 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:07,742 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 5e39b00e357ba7c1ab410abfa44e6cc6: Running coprocessor pre-open hook at 1733574247723Writing region info on filesystem at 1733574247723Initializing all the Stores at 1733574247723Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574247724 (+1 ms)Cleaning up temporary data from old regions at 1733574247739 (+15 ms)Running coprocessor post-open hooks at 1733574247742 (+3 ms)Region opened successfully at 1733574247742 2024-12-07T12:24:07,743 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6., pid=14, masterSystemTime=1733574247719 2024-12-07T12:24:07,743 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 5e39b00e357ba7c1ab410abfa44e6cc6:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:07,743 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-07T12:24:07,743 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,744 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:07,744 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 5e39b00e357ba7c1ab410abfa44e6cc6/info is initiating minor compaction (all files) 2024-12-07T12:24:07,744 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5e39b00e357ba7c1ab410abfa44e6cc6/info in TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:07,744 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-bottom] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/.tmp, totalSize=73.6 K 2024-12-07T12:24:07,745 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733574232813 2024-12-07T12:24:07,745 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:07,745 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:07,746 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:07,746 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 410450476a93cc6ca0ae98001a6fdb48, NAME => 'TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-07T12:24:07,746 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=5e39b00e357ba7c1ab410abfa44e6cc6, regionState=OPEN, openSeqNum=127, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,746 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,746 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:24:07,746 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,746 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,748 INFO [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,748 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-07T12:24:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-07T12:24:07,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-07T12:24:07,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e39b00e357ba7c1ab410abfa44e6cc6, server=27c6fcd7dac8,37961,1733574221909 because future has completed 2024-12-07T12:24:07,748 INFO [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 410450476a93cc6ca0ae98001a6fdb48 columnFamilyName info 2024-12-07T12:24:07,748 DEBUG [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:24:07,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-12-07T12:24:07,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 5e39b00e357ba7c1ab410abfa44e6cc6, server=27c6fcd7dac8,37961,1733574221909 in 182 msec 2024-12-07T12:24:07,755 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=5e39b00e357ba7c1ab410abfa44e6cc6, ASSIGN in 342 msec 2024-12-07T12:24:07,757 DEBUG [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-top 2024-12-07T12:24:07,761 DEBUG [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854 2024-12-07T12:24:07,765 DEBUG [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa 2024-12-07T12:24:07,768 DEBUG [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2 2024-12-07T12:24:07,769 INFO [StoreOpener-410450476a93cc6ca0ae98001a6fdb48-1 {}] regionserver.HStore(327): Store=410450476a93cc6ca0ae98001a6fdb48/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:24:07,769 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,770 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,771 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5e39b00e357ba7c1ab410abfa44e6cc6#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:07,771 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,771 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/.tmp/info/aa0d137fe7014df897e2467fab29778f is 1080, key is row0001/info:/1733574232813/Put/seqid=0 2024-12-07T12:24:07,771 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,771 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/8caf33e8a63646fba8109159453f2138 is 193, key is TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48./info:regioninfo/1733574247563/Put/seqid=0 2024-12-07T12:24:07,773 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,774 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 410450476a93cc6ca0ae98001a6fdb48; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872536, jitterRate=0.10948777198791504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T12:24:07,774 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:07,775 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 410450476a93cc6ca0ae98001a6fdb48: Running coprocessor pre-open hook at 1733574247746Writing region info on filesystem at 1733574247746Initializing all the Stores at 1733574247747 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574247747Cleaning up temporary data from old regions at 1733574247771 (+24 ms)Running coprocessor post-open hooks at 1733574247774 (+3 ms)Region opened successfully at 1733574247775 (+1 ms) 2024-12-07T12:24:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741851_1027 (size=70862) 2024-12-07T12:24:07,776 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., pid=13, masterSystemTime=1733574247719 2024-12-07T12:24:07,776 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 2 2024-12-07T12:24:07,776 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,776 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-07T12:24:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741851_1027 (size=70862) 2024-12-07T12:24:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741852_1028 (size=9847) 2024-12-07T12:24:07,778 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:07,778 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:07,778 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:07,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741852_1028 (size=9847) 2024-12-07T12:24:07,779 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-top, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=120.8 K 2024-12-07T12:24:07,779 DEBUG [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:07,779 INFO [RS_OPEN_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:07,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/8caf33e8a63646fba8109159453f2138 2024-12-07T12:24:07,780 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] compactions.Compactor(225): Compacting 768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733574232813 2024-12-07T12:24:07,780 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1733574247018 2024-12-07T12:24:07,780 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=410450476a93cc6ca0ae98001a6fdb48, regionState=OPEN, openSeqNum=127, regionLocation=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:07,781 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733574247041 2024-12-07T12:24:07,781 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733574247065 2024-12-07T12:24:07,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 because future has completed 2024-12-07T12:24:07,784 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/.tmp/info/aa0d137fe7014df897e2467fab29778f as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/aa0d137fe7014df897e2467fab29778f 2024-12-07T12:24:07,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-07T12:24:07,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 in 218 msec 2024-12-07T12:24:07,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-12-07T12:24:07,791 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=410450476a93cc6ca0ae98001a6fdb48, ASSIGN in 377 msec 2024-12-07T12:24:07,792 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 5e39b00e357ba7c1ab410abfa44e6cc6/info of 5e39b00e357ba7c1ab410abfa44e6cc6 into aa0d137fe7014df897e2467fab29778f(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:07,792 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5e39b00e357ba7c1ab410abfa44e6cc6: 2024-12-07T12:24:07,792 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6., storeName=5e39b00e357ba7c1ab410abfa44e6cc6/info, priority=15, startTime=1733574247743; duration=0sec 2024-12-07T12:24:07,792 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,792 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5e39b00e357ba7c1ab410abfa44e6cc6:info 2024-12-07T12:24:07,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=5e39b00e357ba7c1ab410abfa44e6cc6, daughterB=410450476a93cc6ca0ae98001a6fdb48 in 706 msec 2024-12-07T12:24:07,793 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 2024-12-07T12:24:07,793 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 2024-12-07T12:24:07,793 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 2024-12-07T12:24:07,794 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 613515ffd88b0d443fd5e64f291ab31e, NAME => 'TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-12-07T12:24:07,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=613515ffd88b0d443fd5e64f291ab31e, daughterA=debd6bc281f92502b61de74d22f619c5, daughterB=b6f392bfc1d46af05d40311834bb6796 in 702 msec 2024-12-07T12:24:07,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/ns/9c0c78fd686e41cfa1276e13242b7e6c is 43, key is default/ns:d/1733574222696/Put/seqid=0 2024-12-07T12:24:07,807 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#70 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:07,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741853_1029 (size=5153) 2024-12-07T12:24:07,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741853_1029 (size=5153) 2024-12-07T12:24:07,808 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/82bf19b4c6f5417eb63cbb2d0485158d is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:07,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/ns/9c0c78fd686e41cfa1276e13242b7e6c 2024-12-07T12:24:07,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741854_1030 (size=43081) 2024-12-07T12:24:07,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741854_1030 (size=43081) 2024-12-07T12:24:07,819 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/82bf19b4c6f5417eb63cbb2d0485158d as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/82bf19b4c6f5417eb63cbb2d0485158d 2024-12-07T12:24:07,826 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into 82bf19b4c6f5417eb63cbb2d0485158d(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:07,826 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:07,826 INFO [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=12, startTime=1733574247776; duration=0sec 2024-12-07T12:24:07,826 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:07,826 DEBUG [RS:0;27c6fcd7dac8:37961-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:07,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/table/1324298e538340be8b9d793d33673d35 is 65, key is TestLogRolling-testLogRolling/table:state/1733574223098/Put/seqid=0 2024-12-07T12:24:07,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741855_1031 (size=5340) 2024-12-07T12:24:07,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741855_1031 (size=5340) 2024-12-07T12:24:07,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/table/1324298e538340be8b9d793d33673d35 2024-12-07T12:24:07,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/8caf33e8a63646fba8109159453f2138 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/info/8caf33e8a63646fba8109159453f2138 2024-12-07T12:24:07,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/info/8caf33e8a63646fba8109159453f2138, entries=30, sequenceid=17, filesize=9.6 K 2024-12-07T12:24:07,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/ns/9c0c78fd686e41cfa1276e13242b7e6c as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/ns/9c0c78fd686e41cfa1276e13242b7e6c 2024-12-07T12:24:07,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/ns/9c0c78fd686e41cfa1276e13242b7e6c, entries=2, sequenceid=17, filesize=5.0 K 2024-12-07T12:24:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/table/1324298e538340be8b9d793d33673d35 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/table/1324298e538340be8b9d793d33673d35 2024-12-07T12:24:07,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/table/1324298e538340be8b9d793d33673d35, entries=2, sequenceid=17, filesize=5.2 K 2024-12-07T12:24:07,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 111ms, sequenceid=17, compaction requested=false 2024-12-07T12:24:07,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T12:24:08,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:08,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35016 deadline: 1733574259070, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. is not online on 27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:09,071 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. is not online on 27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:09,071 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e. is not online on 27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:09,072 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733574222739.613515ffd88b0d443fd5e64f291ab31e., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=2 from cache 2024-12-07T12:24:09,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:09,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:10,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:10,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:11,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:11,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:11,841 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:24:12,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:12,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:12,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,829 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:24:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,831 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,856 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:12,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:13,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:13,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:14,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:14,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:15,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:15,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:16,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:16,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:17,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:17,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:18,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:18,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:19,161 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127] 2024-12-07T12:24:19,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:19,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:24:19,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/defea9412c9d47749fb5396fe0d8c910 is 1080, key is row0097/info:/1733574259162/Put/seqid=0 2024-12-07T12:24:19,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741856_1032 (size=12516) 2024-12-07T12:24:19,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741856_1032 (size=12516) 2024-12-07T12:24:19,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/defea9412c9d47749fb5396fe0d8c910 2024-12-07T12:24:19,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/defea9412c9d47749fb5396fe0d8c910 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910 2024-12-07T12:24:19,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910, entries=7, sequenceid=137, filesize=12.2 K 2024-12-07T12:24:19,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 410450476a93cc6ca0ae98001a6fdb48 in 22ms, sequenceid=137, compaction requested=false 2024-12-07T12:24:19,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:19,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-07T12:24:19,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c046258965b6425c9f84206d7761dd55 is 1080, key is row0104/info:/1733574259172/Put/seqid=0 2024-12-07T12:24:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741857_1033 (size=21156) 2024-12-07T12:24:19,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741857_1033 (size=21156) 2024-12-07T12:24:19,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c046258965b6425c9f84206d7761dd55 2024-12-07T12:24:19,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:19,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:19,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c046258965b6425c9f84206d7761dd55 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55 2024-12-07T12:24:19,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55, entries=15, sequenceid=155, filesize=20.7 K 2024-12-07T12:24:19,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 410450476a93cc6ca0ae98001a6fdb48 in 47ms, sequenceid=155, compaction requested=true 2024-12-07T12:24:19,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:19,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:19,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:19,243 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:19,244 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:19,244 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:19,244 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:19,244 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/82bf19b4c6f5417eb63cbb2d0485158d, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=75.0 K 2024-12-07T12:24:19,245 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82bf19b4c6f5417eb63cbb2d0485158d, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733574245010 2024-12-07T12:24:19,245 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting defea9412c9d47749fb5396fe0d8c910, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733574259162 2024-12-07T12:24:19,246 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting c046258965b6425c9f84206d7761dd55, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733574259172 2024-12-07T12:24:19,257 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#74 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:19,258 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/4081f4b2a07c4e7f842ff03f7c9495f8 is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:19,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741858_1034 (size=66967) 2024-12-07T12:24:19,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741858_1034 (size=66967) 2024-12-07T12:24:19,271 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/4081f4b2a07c4e7f842ff03f7c9495f8 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4081f4b2a07c4e7f842ff03f7c9495f8 2024-12-07T12:24:19,277 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into 4081f4b2a07c4e7f842ff03f7c9495f8(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:19,277 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:19,277 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574259243; duration=0sec 2024-12-07T12:24:19,277 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:19,277 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:20,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:20,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:21,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:21,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:21,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:21,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T12:24:21,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c6122c7f288444b2b2047b889782a4f4 is 1080, key is row0119/info:/1733574259196/Put/seqid=0 2024-12-07T12:24:21,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741859_1035 (size=16828) 2024-12-07T12:24:21,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741859_1035 (size=16828) 2024-12-07T12:24:21,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c6122c7f288444b2b2047b889782a4f4 2024-12-07T12:24:21,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/c6122c7f288444b2b2047b889782a4f4 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4 2024-12-07T12:24:21,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4, entries=11, sequenceid=170, filesize=16.4 K 2024-12-07T12:24:21,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for 410450476a93cc6ca0ae98001a6fdb48 in 24ms, sequenceid=170, compaction requested=false 2024-12-07T12:24:21,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:21,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-07T12:24:21,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/25fda82864724a27a22c63a2ff1bd589 is 1080, key is row0130/info:/1733574261239/Put/seqid=0 2024-12-07T12:24:21,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741860_1036 (size=22238) 2024-12-07T12:24:21,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741860_1036 (size=22238) 2024-12-07T12:24:21,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/25fda82864724a27a22c63a2ff1bd589 2024-12-07T12:24:21,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/25fda82864724a27a22c63a2ff1bd589 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589 2024-12-07T12:24:21,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589, entries=16, sequenceid=189, filesize=21.7 K 2024-12-07T12:24:21,284 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 410450476a93cc6ca0ae98001a6fdb48 in 21ms, sequenceid=189, compaction requested=true 2024-12-07T12:24:21,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:21,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:21,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:21,284 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:21,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-07T12:24:21,285 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 106033 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:21,285 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:21,285 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:21,285 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4081f4b2a07c4e7f842ff03f7c9495f8, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=103.5 K 2024-12-07T12:24:21,286 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4081f4b2a07c4e7f842ff03f7c9495f8, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733574245010 2024-12-07T12:24:21,286 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6122c7f288444b2b2047b889782a4f4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733574259196 2024-12-07T12:24:21,287 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25fda82864724a27a22c63a2ff1bd589, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733574261239 2024-12-07T12:24:21,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5009e3a2cb094f3b8d135a91f4d249fa is 1080, key is row0146/info:/1733574261264/Put/seqid=0 2024-12-07T12:24:21,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741861_1037 (size=20078) 2024-12-07T12:24:21,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741861_1037 (size=20078) 2024-12-07T12:24:21,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5009e3a2cb094f3b8d135a91f4d249fa 2024-12-07T12:24:21,302 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#78 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:21,303 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/458000afc39b48cbb4b253c7ef8eb10c is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:21,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5009e3a2cb094f3b8d135a91f4d249fa as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa 2024-12-07T12:24:21,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa, entries=14, sequenceid=206, filesize=19.6 K 2024-12-07T12:24:21,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 410450476a93cc6ca0ae98001a6fdb48 in 23ms, sequenceid=206, compaction requested=false 2024-12-07T12:24:21,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:21,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741862_1038 (size=96252) 2024-12-07T12:24:21,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741862_1038 (size=96252) 2024-12-07T12:24:21,314 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/458000afc39b48cbb4b253c7ef8eb10c as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/458000afc39b48cbb4b253c7ef8eb10c 2024-12-07T12:24:21,319 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into 458000afc39b48cbb4b253c7ef8eb10c(size=94.0 K), total size for store is 113.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:21,319 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:21,319 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574261284; duration=0sec 2024-12-07T12:24:21,319 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:21,319 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:22,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:22,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:23,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:23,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:23,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:23,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:24:23,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/8ad2f142d47642bcab20e92b6161209d is 1080, key is row0160/info:/1733574261286/Put/seqid=0 2024-12-07T12:24:23,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741863_1039 (size=12516) 2024-12-07T12:24:23,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741863_1039 (size=12516) 2024-12-07T12:24:23,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/8ad2f142d47642bcab20e92b6161209d 2024-12-07T12:24:23,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/8ad2f142d47642bcab20e92b6161209d as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d 2024-12-07T12:24:23,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-07T12:24:23,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35016 deadline: 1733574273328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:23,329 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:23,329 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:23,329 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 because the exception is null or not the one we care about 2024-12-07T12:24:23,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d, entries=7, sequenceid=217, filesize=12.2 K 2024-12-07T12:24:23,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 410450476a93cc6ca0ae98001a6fdb48 in 35ms, sequenceid=217, compaction requested=true 2024-12-07T12:24:23,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:23,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:23,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:23,332 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:23,333 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:23,333 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:23,333 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:23,333 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/458000afc39b48cbb4b253c7ef8eb10c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=125.8 K 2024-12-07T12:24:23,333 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 458000afc39b48cbb4b253c7ef8eb10c, keycount=84, bloomtype=ROW, size=94.0 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1733574245010 2024-12-07T12:24:23,333 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5009e3a2cb094f3b8d135a91f4d249fa, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733574261264 2024-12-07T12:24:23,334 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8ad2f142d47642bcab20e92b6161209d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733574261286 2024-12-07T12:24:23,344 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#80 average throughput is 53.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:23,345 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/d50e4dc34e2f4a57a67ba71bfb5f1656 is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741864_1040 (size=118996) 2024-12-07T12:24:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741864_1040 (size=118996) 2024-12-07T12:24:23,353 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/d50e4dc34e2f4a57a67ba71bfb5f1656 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d50e4dc34e2f4a57a67ba71bfb5f1656 2024-12-07T12:24:23,358 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into d50e4dc34e2f4a57a67ba71bfb5f1656(size=116.2 K), total size for store is 116.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:23,359 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:23,359 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574263331; duration=0sec 2024-12-07T12:24:23,359 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:23,359 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:23,512 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T12:24:23,512 INFO [master/27c6fcd7dac8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T12:24:24,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:24,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:25,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:25,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:26,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:26,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:27,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:27,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:27,668 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-07T12:24:28,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:28,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:29,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:29,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:30,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:30,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:31,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:31,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:32,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:32,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:33,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:33,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:33,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-07T12:24:33,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/1bdb0b78d2254cfe84cd18908313039c is 1080, key is row0167/info:/1733574263297/Put/seqid=0 2024-12-07T12:24:33,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741865_1041 (size=29784) 2024-12-07T12:24:33,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741865_1041 (size=29784) 2024-12-07T12:24:33,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/1bdb0b78d2254cfe84cd18908313039c 2024-12-07T12:24:33,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/1bdb0b78d2254cfe84cd18908313039c as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c 2024-12-07T12:24:33,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c, entries=23, sequenceid=244, filesize=29.1 K 2024-12-07T12:24:33,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 410450476a93cc6ca0ae98001a6fdb48 in 28ms, sequenceid=244, compaction requested=false 2024-12-07T12:24:33,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:34,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:34,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:35,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:35,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:35,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T12:24:35,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/81af9fd18eae423394fd24b68fc138b9 is 1080, key is row0190/info:/1733574273338/Put/seqid=0 2024-12-07T12:24:35,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741866_1042 (size=12516) 2024-12-07T12:24:35,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741866_1042 (size=12516) 2024-12-07T12:24:35,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/81af9fd18eae423394fd24b68fc138b9 2024-12-07T12:24:35,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/81af9fd18eae423394fd24b68fc138b9 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9 2024-12-07T12:24:35,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9, entries=7, sequenceid=254, filesize=12.2 K 2024-12-07T12:24:35,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 410450476a93cc6ca0ae98001a6fdb48 in 22ms, sequenceid=254, compaction requested=true 2024-12-07T12:24:35,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:35,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:35,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:35,373 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:35,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:35,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-07T12:24:35,374 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161296 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:35,374 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:35,374 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:35,374 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d50e4dc34e2f4a57a67ba71bfb5f1656, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=157.5 K 2024-12-07T12:24:35,375 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting d50e4dc34e2f4a57a67ba71bfb5f1656, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733574245010 2024-12-07T12:24:35,375 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bdb0b78d2254cfe84cd18908313039c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733574263297 2024-12-07T12:24:35,375 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81af9fd18eae423394fd24b68fc138b9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733574273338 2024-12-07T12:24:35,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/496fccd043a64ac68dc47acc771d1a34 is 1080, key is row0197/info:/1733574275351/Put/seqid=0 2024-12-07T12:24:35,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741867_1043 (size=22254) 2024-12-07T12:24:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741867_1043 (size=22254) 2024-12-07T12:24:35,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/496fccd043a64ac68dc47acc771d1a34 2024-12-07T12:24:35,389 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#84 average throughput is 69.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:35,390 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/4ea8e81340e94c4793ddba1dd9527642 is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:35,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/496fccd043a64ac68dc47acc771d1a34 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34 2024-12-07T12:24:35,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741868_1044 (size=151643) 2024-12-07T12:24:35,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741868_1044 (size=151643) 2024-12-07T12:24:35,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34, entries=16, sequenceid=273, filesize=21.7 K 2024-12-07T12:24:35,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 410450476a93cc6ca0ae98001a6fdb48 in 27ms, sequenceid=273, compaction requested=false 2024-12-07T12:24:35,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:35,401 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/4ea8e81340e94c4793ddba1dd9527642 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4ea8e81340e94c4793ddba1dd9527642 2024-12-07T12:24:35,406 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into 4ea8e81340e94c4793ddba1dd9527642(size=148.1 K), total size for store is 169.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:35,406 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:35,406 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574275372; duration=0sec 2024-12-07T12:24:35,406 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:35,406 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:36,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:36,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:37,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:37,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:37,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-07T12:24:37,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/9aa2aeb569bf405fa1bcae49f8ae8edf is 1080, key is row0213/info:/1733574275374/Put/seqid=0 2024-12-07T12:24:37,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741869_1045 (size=19013) 2024-12-07T12:24:37,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741869_1045 (size=19013) 2024-12-07T12:24:37,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-07T12:24:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35016 deadline: 1733574287421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:37,422 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:37,422 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:37,422 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 because the exception is null or not the one we care about 2024-12-07T12:24:37,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/9aa2aeb569bf405fa1bcae49f8ae8edf 2024-12-07T12:24:37,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/9aa2aeb569bf405fa1bcae49f8ae8edf as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf 2024-12-07T12:24:37,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf, entries=13, sequenceid=290, filesize=18.6 K 2024-12-07T12:24:37,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=16.81 KB/17216 for 410450476a93cc6ca0ae98001a6fdb48 in 421ms, sequenceid=290, compaction requested=true 2024-12-07T12:24:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:37,817 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:37,818 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192910 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:37,818 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:37,818 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:37,819 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4ea8e81340e94c4793ddba1dd9527642, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=188.4 K 2024-12-07T12:24:37,819 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ea8e81340e94c4793ddba1dd9527642, keycount=135, bloomtype=ROW, size=148.1 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733574245010 2024-12-07T12:24:37,819 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 496fccd043a64ac68dc47acc771d1a34, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733574275351 2024-12-07T12:24:37,819 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9aa2aeb569bf405fa1bcae49f8ae8edf, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733574275374 2024-12-07T12:24:37,829 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#86 average throughput is 84.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:37,830 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/d9c295f2c0d54811b9ec61d9003afa2a is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:37,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741870_1046 (size=183044) 2024-12-07T12:24:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741870_1046 (size=183044) 2024-12-07T12:24:37,840 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/d9c295f2c0d54811b9ec61d9003afa2a as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d9c295f2c0d54811b9ec61d9003afa2a 2024-12-07T12:24:37,845 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into d9c295f2c0d54811b9ec61d9003afa2a(size=178.8 K), total size for store is 178.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:37,845 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:37,845 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574277817; duration=0sec 2024-12-07T12:24:37,845 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:37,845 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:38,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:38,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:39,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:39,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:40,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:40,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:41,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:41,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:41,841 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T12:24:42,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:42,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:43,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:43,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:44,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:44,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:45,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:45,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:46,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:47,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:47,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:47,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:47,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-07T12:24:47,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/e95eb312c2a24834abb6d727acac5763 is 1080, key is row0226/info:/1733574277397/Put/seqid=0 2024-12-07T12:24:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741871_1047 (size=23333) 2024-12-07T12:24:47,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741871_1047 (size=23333) 2024-12-07T12:24:47,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-07T12:24:47,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35016 deadline: 1733574297505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:47,506 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:47,506 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=410450476a93cc6ca0ae98001a6fdb48, server=27c6fcd7dac8,37961,1733574221909 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T12:24:47,506 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., hostname=27c6fcd7dac8,37961,1733574221909, seqNum=127 because the exception is null or not the one we care about 2024-12-07T12:24:47,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/e95eb312c2a24834abb6d727acac5763 2024-12-07T12:24:47,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/e95eb312c2a24834abb6d727acac5763 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763 2024-12-07T12:24:47,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763, entries=17, sequenceid=311, filesize=22.8 K 2024-12-07T12:24:47,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 410450476a93cc6ca0ae98001a6fdb48 in 425ms, sequenceid=311, compaction requested=false 2024-12-07T12:24:47,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:48,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:48,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:48,715 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=71, reuseRatio=88.75% 2024-12-07T12:24:48,715 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-07T12:24:49,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:49,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:50,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:50,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:51,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:51,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:52,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:52,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:52,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5e39b00e357ba7c1ab410abfa44e6cc6, had cached 0 bytes from a total of 70862 2024-12-07T12:24:52,746 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 410450476a93cc6ca0ae98001a6fdb48, had cached 0 bytes from a total of 206377 2024-12-07T12:24:53,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:53,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:54,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:54,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:55,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:55,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:55,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,302 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T12:24:55,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:55,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T12:24:56,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:56,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:57,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:57,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:57,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37961 {}] regionserver.HRegion(8855): Flush requested on 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:57,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-07T12:24:57,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/37e64c1b5f994c3485bd20289d129004 is 1080, key is row0243/info:/1733574287488/Put/seqid=0 2024-12-07T12:24:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741872_1048 (size=19013) 2024-12-07T12:24:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741872_1048 (size=19013) 2024-12-07T12:24:57,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/37e64c1b5f994c3485bd20289d129004 2024-12-07T12:24:57,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/37e64c1b5f994c3485bd20289d129004 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004 2024-12-07T12:24:57,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004, entries=13, sequenceid=327, filesize=18.6 K 2024-12-07T12:24:57,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for 410450476a93cc6ca0ae98001a6fdb48 in 20ms, sequenceid=327, compaction requested=true 2024-12-07T12:24:57,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:57,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 410450476a93cc6ca0ae98001a6fdb48:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T12:24:57,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:57,598 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T12:24:57,599 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 225390 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T12:24:57,599 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1541): 410450476a93cc6ca0ae98001a6fdb48/info is initiating minor compaction (all files) 2024-12-07T12:24:57,599 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 410450476a93cc6ca0ae98001a6fdb48/info in TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:57,599 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d9c295f2c0d54811b9ec61d9003afa2a, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004] into tmpdir=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp, totalSize=220.1 K 2024-12-07T12:24:57,599 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9c295f2c0d54811b9ec61d9003afa2a, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733574245010 2024-12-07T12:24:57,600 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting e95eb312c2a24834abb6d727acac5763, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733574277397 2024-12-07T12:24:57,600 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37e64c1b5f994c3485bd20289d129004, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733574287488 2024-12-07T12:24:57,610 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 410450476a93cc6ca0ae98001a6fdb48#info#compaction#89 average throughput is 66.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T12:24:57,610 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/7f0c3af7fa0d47dd9ee45b00eaf014fc is 1080, key is row0062/info:/1733574245010/Put/seqid=0 2024-12-07T12:24:57,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741873_1049 (size=215593) 2024-12-07T12:24:57,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741873_1049 (size=215593) 2024-12-07T12:24:57,619 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/7f0c3af7fa0d47dd9ee45b00eaf014fc as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/7f0c3af7fa0d47dd9ee45b00eaf014fc 2024-12-07T12:24:57,624 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 410450476a93cc6ca0ae98001a6fdb48/info of 410450476a93cc6ca0ae98001a6fdb48 into 7f0c3af7fa0d47dd9ee45b00eaf014fc(size=210.5 K), total size for store is 210.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T12:24:57,624 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:57,624 INFO [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48., storeName=410450476a93cc6ca0ae98001a6fdb48/info, priority=13, startTime=1733574297597; duration=0sec 2024-12-07T12:24:57,624 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T12:24:57,624 DEBUG [RS:0;27c6fcd7dac8:37961-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 410450476a93cc6ca0ae98001a6fdb48:info 2024-12-07T12:24:58,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:58,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:59,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:59,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:24:59,579 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-07T12:24:59,580 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C37961%2C1733574221909.1733574299580 2024-12-07T12:24:59,596 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,596 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,596 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,596 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,596 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,596 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574299580 2024-12-07T12:24:59,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37775:37775),(127.0.0.1/127.0.0.1:40849:40849)] 2024-12-07T12:24:59,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 is not closed yet, will try archiving it next time 2024-12-07T12:24:59,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741833_1009 (size=315283) 2024-12-07T12:24:59,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741833_1009 (size=315283) 2024-12-07T12:24:59,600 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-07T12:24:59,604 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/76930a7e50224161a755f9f5a740e91b is 193, key is TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48./info:regioninfo/1733574247780/Put/seqid=0 2024-12-07T12:24:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741875_1051 (size=6223) 2024-12-07T12:24:59,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741875_1051 (size=6223) 2024-12-07T12:24:59,609 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/76930a7e50224161a755f9f5a740e91b 2024-12-07T12:24:59,613 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/.tmp/info/76930a7e50224161a755f9f5a740e91b as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/info/76930a7e50224161a755f9f5a740e91b 2024-12-07T12:24:59,618 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/info/76930a7e50224161a755f9f5a740e91b, entries=5, sequenceid=21, filesize=6.1 K 2024-12-07T12:24:59,619 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-12-07T12:24:59,619 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T12:24:59,619 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 5e39b00e357ba7c1ab410abfa44e6cc6: 2024-12-07T12:24:59,619 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 410450476a93cc6ca0ae98001a6fdb48 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T12:24:59,623 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5305b87731ce46fea14ea95bcf37081e is 1080, key is row0256/info:/1733574297578/Put/seqid=0 2024-12-07T12:24:59,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741876_1052 (size=6035) 2024-12-07T12:24:59,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741876_1052 (size=6035) 2024-12-07T12:24:59,630 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5305b87731ce46fea14ea95bcf37081e 2024-12-07T12:24:59,635 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/.tmp/info/5305b87731ce46fea14ea95bcf37081e as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5305b87731ce46fea14ea95bcf37081e 2024-12-07T12:24:59,639 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5305b87731ce46fea14ea95bcf37081e, entries=1, sequenceid=332, filesize=5.9 K 2024-12-07T12:24:59,640 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 410450476a93cc6ca0ae98001a6fdb48 in 21ms, sequenceid=332, compaction requested=false 2024-12-07T12:24:59,640 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 410450476a93cc6ca0ae98001a6fdb48: 2024-12-07T12:24:59,640 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C37961%2C1733574221909.1733574299640 2024-12-07T12:24:59,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,645 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,645 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,645 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,645 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574299580 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574299640 2024-12-07T12:24:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741874_1050 (size=731) 2024-12-07T12:24:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741874_1050 (size=731) 2024-12-07T12:24:59,648 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40849:40849),(127.0.0.1/127.0.0.1:37775:37775)] 2024-12-07T12:24:59,649 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs/27c6fcd7dac8%2C37961%2C1733574221909.1733574222288 2024-12-07T12:24:59,649 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T12:24:59,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:24:59,649 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:24:59,649 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:24:59,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:24:59,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:24:59,650 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:24:59,650 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:24:59,650 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1351082966, stopped=false 2024-12-07T12:24:59,650 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,44847,1733574221858 2024-12-07T12:24:59,650 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/WALs/27c6fcd7dac8,37961,1733574221909/27c6fcd7dac8%2C37961%2C1733574221909.1733574299580 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs/27c6fcd7dac8%2C37961%2C1733574221909.1733574299580 2024-12-07T12:24:59,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:24:59,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:24:59,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:24:59,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:24:59,653 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:24:59,653 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:24:59,653 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:24:59,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:24:59,654 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,37961,1733574221909' ***** 2024-12-07T12:24:59,654 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:24:59,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:24:59,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:24:59,654 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:24:59,654 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(3091): Received CLOSE for 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(3091): Received CLOSE for 410450476a93cc6ca0ae98001a6fdb48 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:24:59,655 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5e39b00e357ba7c1ab410abfa44e6cc6, disabling compactions & flushes 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:37961. 2024-12-07T12:24:59,655 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:59,655 DEBUG [RS:0;27c6fcd7dac8:37961 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:24:59,655 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:59,655 DEBUG [RS:0;27c6fcd7dac8:37961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:24:59,655 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. after waiting 0 ms 2024-12-07T12:24:59,655 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:24:59,655 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:24:59,656 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-bottom] to archive 2024-12-07T12:24:59,656 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-07T12:24:59,656 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5e39b00e357ba7c1ab410abfa44e6cc6=TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6., 410450476a93cc6ca0ae98001a6fdb48=TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.} 2024-12-07T12:24:59,656 DEBUG [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 410450476a93cc6ca0ae98001a6fdb48, 5e39b00e357ba7c1ab410abfa44e6cc6 2024-12-07T12:24:59,656 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:24:59,656 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:24:59,656 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:24:59,656 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:24:59,656 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:24:59,657 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:24:59,659 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:59,659 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=27c6fcd7dac8:44847 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T12:24:59,659 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-07T12:24:59,661 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-07T12:24:59,662 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:24:59,662 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:24:59,662 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/5e39b00e357ba7c1ab410abfa44e6cc6/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-12-07T12:24:59,662 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574299656Running coprocessor pre-close hooks at 1733574299656Disabling compacts and flushes for region at 1733574299656Disabling writes for close at 1733574299656Writing region close event to WAL at 1733574299658 (+2 ms)Running coprocessor post-close hooks at 1733574299662 (+4 ms)Closed at 1733574299662 2024-12-07T12:24:59,662 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:24:59,663 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5e39b00e357ba7c1ab410abfa44e6cc6: Waiting for close lock at 1733574299655Running coprocessor pre-close hooks at 1733574299655Disabling compacts and flushes for region at 1733574299655Disabling writes for close at 1733574299655Writing region close event to WAL at 1733574299659 (+4 ms)Running coprocessor post-close hooks at 1733574299662 (+3 ms)Closed at 1733574299662 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733574247085.5e39b00e357ba7c1ab410abfa44e6cc6. 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 410450476a93cc6ca0ae98001a6fdb48, disabling compactions & flushes 2024-12-07T12:24:59,663 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. after waiting 0 ms 2024-12-07T12:24:59,663 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:59,663 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e->hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/613515ffd88b0d443fd5e64f291ab31e/info/768e89cb157e41b196e720fdebfc2fc4-top, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/82bf19b4c6f5417eb63cbb2d0485158d, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4081f4b2a07c4e7f842ff03f7c9495f8, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/458000afc39b48cbb4b253c7ef8eb10c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d50e4dc34e2f4a57a67ba71bfb5f1656, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4ea8e81340e94c4793ddba1dd9527642, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d9c295f2c0d54811b9ec61d9003afa2a, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004] to archive 2024-12-07T12:24:59,664 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T12:24:59,665 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/768e89cb157e41b196e720fdebfc2fc4.613515ffd88b0d443fd5e64f291ab31e 2024-12-07T12:24:59,666 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-37b2c8f9ca294facbe1e53872672b854 2024-12-07T12:24:59,667 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-9e312fa3adda46e9a36e4b093a10eafa 2024-12-07T12:24:59,668 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/82bf19b4c6f5417eb63cbb2d0485158d to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/82bf19b4c6f5417eb63cbb2d0485158d 2024-12-07T12:24:59,669 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/TestLogRolling-testLogRolling=613515ffd88b0d443fd5e64f291ab31e-f1b8c5fcf1c7450fa437fb125ba647e2 2024-12-07T12:24:59,670 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/defea9412c9d47749fb5396fe0d8c910 2024-12-07T12:24:59,671 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4081f4b2a07c4e7f842ff03f7c9495f8 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4081f4b2a07c4e7f842ff03f7c9495f8 2024-12-07T12:24:59,672 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c046258965b6425c9f84206d7761dd55 2024-12-07T12:24:59,673 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/c6122c7f288444b2b2047b889782a4f4 2024-12-07T12:24:59,674 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/458000afc39b48cbb4b253c7ef8eb10c to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/458000afc39b48cbb4b253c7ef8eb10c 2024-12-07T12:24:59,675 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/25fda82864724a27a22c63a2ff1bd589 2024-12-07T12:24:59,676 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/5009e3a2cb094f3b8d135a91f4d249fa 2024-12-07T12:24:59,677 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d50e4dc34e2f4a57a67ba71bfb5f1656 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d50e4dc34e2f4a57a67ba71bfb5f1656 2024-12-07T12:24:59,678 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/8ad2f142d47642bcab20e92b6161209d 2024-12-07T12:24:59,679 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/1bdb0b78d2254cfe84cd18908313039c 2024-12-07T12:24:59,680 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4ea8e81340e94c4793ddba1dd9527642 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/4ea8e81340e94c4793ddba1dd9527642 2024-12-07T12:24:59,682 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/81af9fd18eae423394fd24b68fc138b9 2024-12-07T12:24:59,683 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/496fccd043a64ac68dc47acc771d1a34 2024-12-07T12:24:59,684 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d9c295f2c0d54811b9ec61d9003afa2a to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/d9c295f2c0d54811b9ec61d9003afa2a 2024-12-07T12:24:59,686 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/9aa2aeb569bf405fa1bcae49f8ae8edf 2024-12-07T12:24:59,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/e95eb312c2a24834abb6d727acac5763 2024-12-07T12:24:59,688 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004 to hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/archive/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/info/37e64c1b5f994c3485bd20289d129004 2024-12-07T12:24:59,688 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [82bf19b4c6f5417eb63cbb2d0485158d=43081, defea9412c9d47749fb5396fe0d8c910=12516, 4081f4b2a07c4e7f842ff03f7c9495f8=66967, c046258965b6425c9f84206d7761dd55=21156, c6122c7f288444b2b2047b889782a4f4=16828, 458000afc39b48cbb4b253c7ef8eb10c=96252, 25fda82864724a27a22c63a2ff1bd589=22238, 5009e3a2cb094f3b8d135a91f4d249fa=20078, d50e4dc34e2f4a57a67ba71bfb5f1656=118996, 8ad2f142d47642bcab20e92b6161209d=12516, 1bdb0b78d2254cfe84cd18908313039c=29784, 4ea8e81340e94c4793ddba1dd9527642=151643, 81af9fd18eae423394fd24b68fc138b9=12516, 496fccd043a64ac68dc47acc771d1a34=22254, d9c295f2c0d54811b9ec61d9003afa2a=183044, 9aa2aeb569bf405fa1bcae49f8ae8edf=19013, e95eb312c2a24834abb6d727acac5763=23333, 37e64c1b5f994c3485bd20289d129004=19013] 2024-12-07T12:24:59,691 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/data/default/TestLogRolling-testLogRolling/410450476a93cc6ca0ae98001a6fdb48/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-12-07T12:24:59,692 INFO [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:59,692 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 410450476a93cc6ca0ae98001a6fdb48: Waiting for close lock at 1733574299663Running coprocessor pre-close hooks at 1733574299663Disabling compacts and flushes for region at 1733574299663Disabling writes for close at 1733574299663Writing region close event to WAL at 1733574299688 (+25 ms)Running coprocessor post-close hooks at 1733574299692 (+4 ms)Closed at 1733574299692 2024-12-07T12:24:59,692 DEBUG [RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733574247085.410450476a93cc6ca0ae98001a6fdb48. 2024-12-07T12:24:59,856 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,37961,1733574221909; all regions closed. 2024-12-07T12:24:59,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,857 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741834_1010 (size=8107) 2024-12-07T12:24:59,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741834_1010 (size=8107) 2024-12-07T12:24:59,862 DEBUG [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs 2024-12-07T12:24:59,862 INFO [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C37961%2C1733574221909.meta:.meta(num 1733574222661) 2024-12-07T12:24:59,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,863 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,863 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,863 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741877_1053 (size=780) 2024-12-07T12:24:59,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741877_1053 (size=780) 2024-12-07T12:24:59,866 DEBUG [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/oldWALs 2024-12-07T12:24:59,866 INFO [RS:0;27c6fcd7dac8:37961 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C37961%2C1733574221909:(num 1733574299640) 2024-12-07T12:24:59,866 DEBUG [RS:0;27c6fcd7dac8:37961 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:24:59,867 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:24:59,867 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:24:59,867 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:24:59,867 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:24:59,867 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:24:59,867 INFO [RS:0;27c6fcd7dac8:37961 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37961 2024-12-07T12:24:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:24:59,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,37961,1733574221909 2024-12-07T12:24:59,869 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:24:59,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,37961,1733574221909] 2024-12-07T12:24:59,871 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,37961,1733574221909 already deleted, retry=false 2024-12-07T12:24:59,871 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,37961,1733574221909 expired; onlineServers=0 2024-12-07T12:24:59,871 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,44847,1733574221858' ***** 2024-12-07T12:24:59,871 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:24:59,871 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:24:59,871 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:24:59,871 DEBUG [M:0;27c6fcd7dac8:44847 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:24:59,872 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:24:59,872 DEBUG [M:0;27c6fcd7dac8:44847 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:24:59,872 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574222064 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574222064,5,FailOnTimeoutGroup] 2024-12-07T12:24:59,872 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574222064 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574222064,5,FailOnTimeoutGroup] 2024-12-07T12:24:59,872 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:24:59,872 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:24:59,872 DEBUG [M:0;27c6fcd7dac8:44847 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:24:59,872 INFO [M:0;27c6fcd7dac8:44847 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:24:59,872 INFO [M:0;27c6fcd7dac8:44847 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:24:59,872 INFO [M:0;27c6fcd7dac8:44847 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:24:59,872 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:24:59,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:24:59,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:24:59,875 DEBUG [M:0;27c6fcd7dac8:44847 {}] zookeeper.ZKUtil(347): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:24:59,875 WARN [M:0;27c6fcd7dac8:44847 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:24:59,876 INFO [M:0;27c6fcd7dac8:44847 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/.lastflushedseqids 2024-12-07T12:24:59,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741878_1054 (size=228) 2024-12-07T12:24:59,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741878_1054 (size=228) 2024-12-07T12:24:59,881 INFO [M:0;27c6fcd7dac8:44847 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:24:59,881 INFO [M:0;27c6fcd7dac8:44847 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:24:59,881 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:24:59,881 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:24:59,881 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:24:59,881 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:24:59,881 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:24:59,881 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-12-07T12:24:59,897 DEBUG [M:0;27c6fcd7dac8:44847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d55ea5fcd6764d609d4bc10d20eb738c is 82, key is hbase:meta,,1/info:regioninfo/1733574222682/Put/seqid=0 2024-12-07T12:24:59,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741879_1055 (size=5672) 2024-12-07T12:24:59,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741879_1055 (size=5672) 2024-12-07T12:24:59,902 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d55ea5fcd6764d609d4bc10d20eb738c 2024-12-07T12:24:59,920 DEBUG [M:0;27c6fcd7dac8:44847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b72b21704ea42fa8358a5d87cd131b8 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733574223102/Put/seqid=0 2024-12-07T12:24:59,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741880_1056 (size=7680) 2024-12-07T12:24:59,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741880_1056 (size=7680) 2024-12-07T12:24:59,925 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b72b21704ea42fa8358a5d87cd131b8 2024-12-07T12:24:59,929 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b72b21704ea42fa8358a5d87cd131b8 2024-12-07T12:24:59,944 DEBUG [M:0;27c6fcd7dac8:44847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf4d9c8c53c44f2284c4f3e004ea5ed7 is 69, key is 27c6fcd7dac8,37961,1733574221909/rs:state/1733574222144/Put/seqid=0 2024-12-07T12:24:59,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741881_1057 (size=5156) 2024-12-07T12:24:59,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741881_1057 (size=5156) 2024-12-07T12:24:59,948 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf4d9c8c53c44f2284c4f3e004ea5ed7 2024-12-07T12:24:59,966 DEBUG [M:0;27c6fcd7dac8:44847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efc381a5b2a64750ad458d1224e85cde is 52, key is load_balancer_on/state:d/1733574222735/Put/seqid=0 2024-12-07T12:24:59,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741882_1058 (size=5056) 2024-12-07T12:24:59,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741882_1058 (size=5056) 2024-12-07T12:24:59,970 INFO [RS:0;27c6fcd7dac8:37961 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:24:59,970 INFO [RS:0;27c6fcd7dac8:37961 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,37961,1733574221909; zookeeper connection closed. 2024-12-07T12:24:59,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:24:59,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37961-0x1018ce14aa20001, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:24:59,971 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4ba5498f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4ba5498f 2024-12-07T12:24:59,971 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:24:59,971 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efc381a5b2a64750ad458d1224e85cde 2024-12-07T12:24:59,976 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d55ea5fcd6764d609d4bc10d20eb738c as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d55ea5fcd6764d609d4bc10d20eb738c 2024-12-07T12:24:59,979 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d55ea5fcd6764d609d4bc10d20eb738c, entries=8, sequenceid=129, filesize=5.5 K 2024-12-07T12:24:59,980 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b72b21704ea42fa8358a5d87cd131b8 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b72b21704ea42fa8358a5d87cd131b8 2024-12-07T12:24:59,984 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b72b21704ea42fa8358a5d87cd131b8 2024-12-07T12:24:59,984 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b72b21704ea42fa8358a5d87cd131b8, entries=14, sequenceid=129, filesize=7.5 K 2024-12-07T12:24:59,985 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf4d9c8c53c44f2284c4f3e004ea5ed7 as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf4d9c8c53c44f2284c4f3e004ea5ed7 2024-12-07T12:24:59,989 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf4d9c8c53c44f2284c4f3e004ea5ed7, entries=1, sequenceid=129, filesize=5.0 K 2024-12-07T12:24:59,990 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/efc381a5b2a64750ad458d1224e85cde as hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/efc381a5b2a64750ad458d1224e85cde 2024-12-07T12:24:59,994 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42271/user/jenkins/test-data/1e0a744c-3077-1455-c5fb-3ee6621adf52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/efc381a5b2a64750ad458d1224e85cde, entries=1, sequenceid=129, filesize=4.9 K 2024-12-07T12:24:59,995 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=129, compaction requested=false 2024-12-07T12:24:59,996 INFO [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:24:59,996 DEBUG [M:0;27c6fcd7dac8:44847 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574299881Disabling compacts and flushes for region at 1733574299881Disabling writes for close at 1733574299881Obtaining lock to block concurrent updates at 1733574299881Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574299881Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1733574299881Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574299882 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574299882Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574299896 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574299896Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574299906 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574299920 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574299920Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574299929 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574299943 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574299943Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574299952 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574299966 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574299966Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47eb727d: reopening flushed file at 1733574299975 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b4d4464: reopening flushed file at 1733574299980 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63aec65e: reopening flushed file at 1733574299984 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15d33173: reopening flushed file at 1733574299989 (+5 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=129, compaction requested=false at 1733574299995 (+6 ms)Writing region close event to WAL at 1733574299996 (+1 ms)Closed at 1733574299996 2024-12-07T12:24:59,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,997 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,997 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,997 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:24:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741830_1006 (size=63915) 2024-12-07T12:24:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41323 is added to blk_1073741830_1006 (size=63915) 2024-12-07T12:25:00,000 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:25:00,000 INFO [M:0;27c6fcd7dac8:44847 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:25:00,000 INFO [M:0;27c6fcd7dac8:44847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44847 2024-12-07T12:25:00,000 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:25:00,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:00,102 INFO [M:0;27c6fcd7dac8:44847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:25:00,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44847-0x1018ce14aa20000, quorum=127.0.0.1:55472, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:00,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dcfcbff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:00,105 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc4994c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:00,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:00,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2fc2e7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:00,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ab86f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:00,106 WARN [BP-1023889294-172.17.0.2-1733574221188 heartbeating to localhost/127.0.0.1:42271 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:25:00,106 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:25:00,106 WARN [BP-1023889294-172.17.0.2-1733574221188 heartbeating to localhost/127.0.0.1:42271 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1023889294-172.17.0.2-1733574221188 (Datanode Uuid f387faad-8452-405f-ae97-7f76eee1d882) service to localhost/127.0.0.1:42271 2024-12-07T12:25:00,106 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:25:00,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data4/current/BP-1023889294-172.17.0.2-1733574221188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:00,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data3/current/BP-1023889294-172.17.0.2-1733574221188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:00,107 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:25:00,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@631c133{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:00,109 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3523e770{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:00,109 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:00,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60b9b83d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:00,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb23947{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:00,110 WARN [BP-1023889294-172.17.0.2-1733574221188 heartbeating to localhost/127.0.0.1:42271 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:25:00,111 WARN [BP-1023889294-172.17.0.2-1733574221188 heartbeating to localhost/127.0.0.1:42271 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1023889294-172.17.0.2-1733574221188 (Datanode Uuid ab8ed10e-5026-4b52-9a3c-4b2a8f070b36) service to localhost/127.0.0.1:42271 2024-12-07T12:25:00,111 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:25:00,111 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:25:00,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data1/current/BP-1023889294-172.17.0.2-1733574221188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:00,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/cluster_6dda650a-2883-97a9-0b81-69b13d0af14b/data/data2/current/BP-1023889294-172.17.0.2-1733574221188 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:00,111 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:25:00,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76385d85{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:25:00,117 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1776bdc2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:00,117 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:00,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e1b4695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:00,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3def21d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:00,124 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:25:00,150 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:25:00,159 INFO [regionserver/27c6fcd7dac8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:25:00,162 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=228 (was 207) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42271 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42271 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:42271 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42271 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42271 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42271 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=14 (was 32), ProcessCount=11 (was 11), AvailableMemoryMB=6097 (was 6140) 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=14, ProcessCount=11, AvailableMemoryMB=6097 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.log.dir so I do NOT create it in target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/af0c1146-9f3d-599c-6872-655041c5565a/hadoop.tmp.dir so I do NOT create it in target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e, deleteOnExit=true 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/test.cache.data in system properties and HBase conf 2024-12-07T12:25:00,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T12:25:00,171 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/nfs.dump.dir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/java.io.tmpdir in system properties and HBase conf 2024-12-07T12:25:00,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T12:25:00,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T12:25:00,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T12:25:00,184 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:25:00,245 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:25:00,249 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:25:00,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:25:00,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:25:00,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:25:00,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:25:00,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b72d363{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:25:00,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c66b7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:25:00,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:25:00,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:25:00,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61edd007{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/java.io.tmpdir/jetty-localhost-36719-hadoop-hdfs-3_4_1-tests_jar-_-any-7266304307120176006/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:25:00,368 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d664f93{HTTP/1.1, (http/1.1)}{localhost:36719} 2024-12-07T12:25:00,368 INFO [Time-limited test {}] server.Server(415): Started @317786ms 2024-12-07T12:25:00,381 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T12:25:00,441 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:25:00,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:25:00,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:25:00,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:25:00,444 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:25:00,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fcd61c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:25:00,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1040cecb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:25:00,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9fc2daa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/java.io.tmpdir/jetty-localhost-43087-hadoop-hdfs-3_4_1-tests_jar-_-any-3820326716745485549/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:00,559 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a1af98b{HTTP/1.1, (http/1.1)}{localhost:43087} 2024-12-07T12:25:00,559 INFO [Time-limited test {}] server.Server(415): Started @317977ms 2024-12-07T12:25:00,561 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:25:00,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T12:25:00,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T12:25:00,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T12:25:00,592 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T12:25:00,592 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T12:25:00,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,AVAILABLE} 2024-12-07T12:25:00,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T12:25:00,657 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data1/current/BP-1998150225-172.17.0.2-1733574300190/current, will proceed with Du for space computation calculation, 2024-12-07T12:25:00,657 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data2/current/BP-1998150225-172.17.0.2-1733574300190/current, will proceed with Du for space computation calculation, 2024-12-07T12:25:00,673 WARN [Thread-2476 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:25:00,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9904698e6c1f3bc8 with lease ID 0xdd5bbf4e264ad708: Processing first storage report for DS-fb5b0027-56b9-439e-bfbd-aea408c97c7f from datanode DatanodeRegistration(127.0.0.1:34299, datanodeUuid=24639138-e2c1-4e12-a22e-7872fd0cba6a, infoPort=37901, infoSecurePort=0, ipcPort=35099, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190) 2024-12-07T12:25:00,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9904698e6c1f3bc8 with lease ID 0xdd5bbf4e264ad708: from storage DS-fb5b0027-56b9-439e-bfbd-aea408c97c7f node DatanodeRegistration(127.0.0.1:34299, datanodeUuid=24639138-e2c1-4e12-a22e-7872fd0cba6a, infoPort=37901, infoSecurePort=0, ipcPort=35099, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:25:00,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9904698e6c1f3bc8 with lease ID 0xdd5bbf4e264ad708: Processing first storage report for DS-b0c617f8-cc72-4ba6-9580-c60522329a49 from datanode DatanodeRegistration(127.0.0.1:34299, datanodeUuid=24639138-e2c1-4e12-a22e-7872fd0cba6a, infoPort=37901, infoSecurePort=0, ipcPort=35099, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190) 2024-12-07T12:25:00,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9904698e6c1f3bc8 with lease ID 0xdd5bbf4e264ad708: from storage DS-b0c617f8-cc72-4ba6-9580-c60522329a49 node DatanodeRegistration(127.0.0.1:34299, datanodeUuid=24639138-e2c1-4e12-a22e-7872fd0cba6a, infoPort=37901, infoSecurePort=0, ipcPort=35099, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:25:00,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@627a202d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/java.io.tmpdir/jetty-localhost-33939-hadoop-hdfs-3_4_1-tests_jar-_-any-13489647149833923045/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:00,715 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:33939} 2024-12-07T12:25:00,716 INFO [Time-limited test {}] server.Server(415): Started @318133ms 2024-12-07T12:25:00,717 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T12:25:00,818 WARN [Thread-2523 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data3/current/BP-1998150225-172.17.0.2-1733574300190/current, will proceed with Du for space computation calculation, 2024-12-07T12:25:00,818 WARN [Thread-2524 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data4/current/BP-1998150225-172.17.0.2-1733574300190/current, will proceed with Du for space computation calculation, 2024-12-07T12:25:00,835 WARN [Thread-2512 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T12:25:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0274630931d023a with lease ID 0xdd5bbf4e264ad709: Processing first storage report for DS-34678e8f-c7f4-4a90-8ce4-6d323f8258e8 from datanode DatanodeRegistration(127.0.0.1:34809, datanodeUuid=b6fdde4f-54c7-4827-81bd-9894087f8d9b, infoPort=39999, infoSecurePort=0, ipcPort=37209, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190) 2024-12-07T12:25:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0274630931d023a with lease ID 0xdd5bbf4e264ad709: from storage DS-34678e8f-c7f4-4a90-8ce4-6d323f8258e8 node DatanodeRegistration(127.0.0.1:34809, datanodeUuid=b6fdde4f-54c7-4827-81bd-9894087f8d9b, infoPort=39999, infoSecurePort=0, ipcPort=37209, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:25:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0274630931d023a with lease ID 0xdd5bbf4e264ad709: Processing first storage report for DS-8615e3e5-ceac-4238-9115-6dd11ca05190 from datanode DatanodeRegistration(127.0.0.1:34809, datanodeUuid=b6fdde4f-54c7-4827-81bd-9894087f8d9b, infoPort=39999, infoSecurePort=0, ipcPort=37209, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190) 2024-12-07T12:25:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0274630931d023a with lease ID 0xdd5bbf4e264ad709: from storage DS-8615e3e5-ceac-4238-9115-6dd11ca05190 node DatanodeRegistration(127.0.0.1:34809, datanodeUuid=b6fdde4f-54c7-4827-81bd-9894087f8d9b, infoPort=39999, infoSecurePort=0, ipcPort=37209, storageInfo=lv=-57;cid=testClusterID;nsid=1555453148;c=1733574300190), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T12:25:00,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7 2024-12-07T12:25:00,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/zookeeper_0, clientPort=60080, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T12:25:00,844 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60080 2024-12-07T12:25:00,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:25:00,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741825_1001 (size=7) 2024-12-07T12:25:00,856 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33 with version=8 2024-12-07T12:25:00,856 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35545/user/jenkins/test-data/644bca52-7933-032e-fad9-69210cff361e/hbase-staging 2024-12-07T12:25:00,858 INFO [Time-limited test {}] client.ConnectionUtils(128): master/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:25:00,858 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T12:25:00,859 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:25:00,859 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45431 2024-12-07T12:25:00,860 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45431 connecting to ZooKeeper ensemble=127.0.0.1:60080 2024-12-07T12:25:00,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454310x0, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:25:00,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45431-0x1018ce27f3b0000 connected 2024-12-07T12:25:00,887 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,888 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,890 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:25:00,890 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33, hbase.cluster.distributed=false 2024-12-07T12:25:00,892 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:25:00,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45431 2024-12-07T12:25:00,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45431 2024-12-07T12:25:00,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45431 2024-12-07T12:25:00,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45431 2024-12-07T12:25:00,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45431 2024-12-07T12:25:00,910 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/27c6fcd7dac8:0 server-side Connection retries=45 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T12:25:00,910 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T12:25:00,911 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34755 2024-12-07T12:25:00,912 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34755 connecting to ZooKeeper ensemble=127.0.0.1:60080 2024-12-07T12:25:00,912 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347550x0, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T12:25:00,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347550x0, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:25:00,918 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34755-0x1018ce27f3b0001 connected 2024-12-07T12:25:00,918 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T12:25:00,919 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T12:25:00,919 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T12:25:00,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T12:25:00,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34755 2024-12-07T12:25:00,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34755 2024-12-07T12:25:00,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34755 2024-12-07T12:25:00,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34755 2024-12-07T12:25:00,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34755 2024-12-07T12:25:00,933 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;27c6fcd7dac8:45431 2024-12-07T12:25:00,933 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:00,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:25:00,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:25:00,935 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:00,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T12:25:00,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:00,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:00,938 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T12:25:00,938 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/27c6fcd7dac8,45431,1733574300858 from backup master directory 2024-12-07T12:25:00,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:00,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:25:00,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T12:25:00,939 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:25:00,939 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:00,943 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/hbase.id] with ID: 0d0c0b6f-ae56-459d-8f95-a89facc2a6e9 2024-12-07T12:25:00,943 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/.tmp/hbase.id 2024-12-07T12:25:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:25:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741826_1002 (size=42) 2024-12-07T12:25:00,949 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/.tmp/hbase.id]:[hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/hbase.id] 2024-12-07T12:25:00,958 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:00,958 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T12:25:00,959 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T12:25:00,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:00,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:25:00,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741827_1003 (size=196) 2024-12-07T12:25:00,967 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T12:25:00,967 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T12:25:00,968 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:25:00,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:25:00,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741828_1004 (size=1189) 2024-12-07T12:25:00,974 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store 2024-12-07T12:25:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:25:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741829_1005 (size=34) 2024-12-07T12:25:00,984 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:25:00,985 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:00,985 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574300985Disabling compacts and flushes for region at 1733574300985Disabling writes for close at 1733574300985Writing region close event to WAL at 1733574300985Closed at 1733574300985 2024-12-07T12:25:00,985 WARN [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/.initializing 2024-12-07T12:25:00,985 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/WALs/27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:00,987 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C45431%2C1733574300858, suffix=, logDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/WALs/27c6fcd7dac8,45431,1733574300858, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/oldWALs, maxLogs=10 2024-12-07T12:25:00,988 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C45431%2C1733574300858.1733574300988 2024-12-07T12:25:00,993 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/WALs/27c6fcd7dac8,45431,1733574300858/27c6fcd7dac8%2C45431%2C1733574300858.1733574300988 2024-12-07T12:25:00,994 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39999:39999),(127.0.0.1/127.0.0.1:37901:37901)] 2024-12-07T12:25:00,995 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:25:00,995 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:25:00,995 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:00,995 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:00,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:00,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T12:25:00,997 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:00,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:00,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:00,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T12:25:00,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:00,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:25:00,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T12:25:01,000 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:25:01,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T12:25:01,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T12:25:01,002 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,003 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,004 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,004 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,004 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T12:25:01,005 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T12:25:01,007 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:25:01,007 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798746, jitterRate=0.015658095479011536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T12:25:01,008 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733574300995Initializing all the Stores at 1733574300996 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574300996Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574300996Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574300996Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574300996Cleaning up temporary data from old regions at 1733574301004 (+8 ms)Region opened successfully at 1733574301008 (+4 ms) 2024-12-07T12:25:01,008 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T12:25:01,010 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280286ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:25:01,011 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T12:25:01,011 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T12:25:01,011 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T12:25:01,011 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T12:25:01,012 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T12:25:01,012 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T12:25:01,012 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T12:25:01,014 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T12:25:01,015 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T12:25:01,016 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T12:25:01,016 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T12:25:01,017 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T12:25:01,019 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T12:25:01,019 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T12:25:01,020 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T12:25:01,021 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T12:25:01,021 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T12:25:01,022 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T12:25:01,024 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T12:25:01,025 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T12:25:01,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:25:01,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T12:25:01,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,028 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=27c6fcd7dac8,45431,1733574300858, sessionid=0x1018ce27f3b0000, setting cluster-up flag (Was=false) 2024-12-07T12:25:01,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,035 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T12:25:01,035 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:01,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,042 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T12:25:01,043 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:01,044 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T12:25:01,045 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T12:25:01,046 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T12:25:01,046 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T12:25:01,046 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 27c6fcd7dac8,45431,1733574300858 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=5, maxPoolSize=5 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/27c6fcd7dac8:0, corePoolSize=10, maxPoolSize=10 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:25:01,047 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733574331048 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T12:25:01,048 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:25:01,048 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T12:25:01,048 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,049 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T12:25:01,049 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T12:25:01,049 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T12:25:01,049 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T12:25:01,049 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T12:25:01,049 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,049 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574301049,5,FailOnTimeoutGroup] 2024-12-07T12:25:01,049 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T12:25:01,050 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574301049,5,FailOnTimeoutGroup] 2024-12-07T12:25:01,050 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,050 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T12:25:01,050 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,050 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:25:01,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741831_1007 (size=1321) 2024-12-07T12:25:01,055 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T12:25:01,056 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33 2024-12-07T12:25:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:25:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741832_1008 (size=32) 2024-12-07T12:25:01,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:25:01,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:25:01,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:25:01,063 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:25:01,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:25:01,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:25:01,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:25:01,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:25:01,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:25:01,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:25:01,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740 2024-12-07T12:25:01,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740 2024-12-07T12:25:01,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:25:01,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:25:01,068 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:25:01,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:25:01,070 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T12:25:01,071 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851055, jitterRate=0.08217307925224304}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:25:01,071 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733574301061Initializing all the Stores at 1733574301061Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301061Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301061Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574301061Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301061Cleaning up temporary data from old regions at 1733574301068 (+7 ms)Region opened successfully at 1733574301071 (+3 ms) 2024-12-07T12:25:01,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:25:01,072 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:25:01,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:25:01,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:25:01,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:25:01,072 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:25:01,072 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574301072Disabling compacts and flushes for region at 1733574301072Disabling writes for close at 1733574301072Writing region close event to WAL at 1733574301072Closed at 1733574301072 2024-12-07T12:25:01,073 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:25:01,073 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T12:25:01,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T12:25:01,075 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:25:01,075 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T12:25:01,123 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(746): ClusterId : 0d0c0b6f-ae56-459d-8f95-a89facc2a6e9 2024-12-07T12:25:01,123 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T12:25:01,125 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T12:25:01,125 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T12:25:01,128 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T12:25:01,129 DEBUG [RS:0;27c6fcd7dac8:34755 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@144fbde5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=27c6fcd7dac8/172.17.0.2:0 2024-12-07T12:25:01,140 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;27c6fcd7dac8:34755 2024-12-07T12:25:01,141 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T12:25:01,141 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T12:25:01,141 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T12:25:01,141 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(2659): reportForDuty to master=27c6fcd7dac8,45431,1733574300858 with port=34755, startcode=1733574300909 2024-12-07T12:25:01,141 DEBUG [RS:0;27c6fcd7dac8:34755 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T12:25:01,143 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59005, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T12:25:01,144 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,144 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45431 {}] master.ServerManager(517): Registering regionserver=27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,145 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33 2024-12-07T12:25:01,145 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39881 2024-12-07T12:25:01,145 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T12:25:01,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:25:01,147 DEBUG [RS:0;27c6fcd7dac8:34755 {}] zookeeper.ZKUtil(111): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,147 WARN [RS:0;27c6fcd7dac8:34755 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T12:25:01,147 INFO [RS:0;27c6fcd7dac8:34755 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:25:01,147 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [27c6fcd7dac8,34755,1733574300909] 2024-12-07T12:25:01,150 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T12:25:01,152 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T12:25:01,152 INFO [RS:0;27c6fcd7dac8:34755 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T12:25:01,152 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,152 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T12:25:01,153 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T12:25:01,153 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=2, maxPoolSize=2 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/27c6fcd7dac8:0, corePoolSize=1, maxPoolSize=1 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:25:01,153 DEBUG [RS:0;27c6fcd7dac8:34755 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/27c6fcd7dac8:0, corePoolSize=3, maxPoolSize=3 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,154 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,34755,1733574300909-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:25:01,168 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T12:25:01,168 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,34755,1733574300909-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,168 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,168 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.Replication(171): 27c6fcd7dac8,34755,1733574300909 started 2024-12-07T12:25:01,182 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,182 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1482): Serving as 27c6fcd7dac8,34755,1733574300909, RpcServer on 27c6fcd7dac8/172.17.0.2:34755, sessionid=0x1018ce27f3b0001 2024-12-07T12:25:01,182 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T12:25:01,182 DEBUG [RS:0;27c6fcd7dac8:34755 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,182 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,34755,1733574300909' 2024-12-07T12:25:01,182 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '27c6fcd7dac8,34755,1733574300909' 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T12:25:01,183 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T12:25:01,184 DEBUG [RS:0;27c6fcd7dac8:34755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T12:25:01,184 INFO [RS:0;27c6fcd7dac8:34755 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T12:25:01,184 INFO [RS:0;27c6fcd7dac8:34755 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T12:25:01,226 WARN [27c6fcd7dac8:45431 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T12:25:01,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,46855,1733574087478/27c6fcd7dac8%2C46855%2C1733574087478.meta.1733574088764.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:25:01,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45085/user/jenkins/test-data/9e5b6cb7-68c1-bd90-ef8f-5df41dbe53c3/WALs/27c6fcd7dac8,37667,1733574088962/27c6fcd7dac8%2C37667%2C1733574088962.1733574089197 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T12:25:01,285 INFO [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C34755%2C1733574300909, suffix=, logDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/27c6fcd7dac8,34755,1733574300909, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs, maxLogs=32 2024-12-07T12:25:01,286 INFO [RS:0;27c6fcd7dac8:34755 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34755%2C1733574300909.1733574301286 2024-12-07T12:25:01,291 INFO [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/27c6fcd7dac8,34755,1733574300909/27c6fcd7dac8%2C34755%2C1733574300909.1733574301286 2024-12-07T12:25:01,291 DEBUG [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37901:37901),(127.0.0.1/127.0.0.1:39999:39999)] 2024-12-07T12:25:01,476 DEBUG [27c6fcd7dac8:45431 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T12:25:01,476 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,478 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,34755,1733574300909, state=OPENING 2024-12-07T12:25:01,479 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T12:25:01,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,481 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:25:01,481 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:25:01,481 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T12:25:01,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34755,1733574300909}] 2024-12-07T12:25:01,633 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T12:25:01,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47171, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T12:25:01,638 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T12:25:01,638 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:25:01,639 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=27c6fcd7dac8%2C34755%2C1733574300909.meta, suffix=.meta, logDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/27c6fcd7dac8,34755,1733574300909, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs, maxLogs=32 2024-12-07T12:25:01,640 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 27c6fcd7dac8%2C34755%2C1733574300909.meta.1733574301640.meta 2024-12-07T12:25:01,644 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/27c6fcd7dac8,34755,1733574300909/27c6fcd7dac8%2C34755%2C1733574300909.meta.1733574301640.meta 2024-12-07T12:25:01,652 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37901:37901),(127.0.0.1/127.0.0.1:39999:39999)] 2024-12-07T12:25:01,656 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T12:25:01,657 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T12:25:01,657 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T12:25:01,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T12:25:01,659 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T12:25:01,659 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T12:25:01,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T12:25:01,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,660 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T12:25:01,661 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T12:25:01,661 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T12:25:01,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T12:25:01,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T12:25:01,662 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T12:25:01,662 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T12:25:01,663 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740 2024-12-07T12:25:01,664 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740 2024-12-07T12:25:01,665 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T12:25:01,665 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T12:25:01,665 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T12:25:01,666 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T12:25:01,667 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813406, jitterRate=0.03430025279521942}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T12:25:01,667 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T12:25:01,680 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733574301657Writing region info on filesystem at 1733574301657Initializing all the Stores at 1733574301658 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301658Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301658Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733574301658Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733574301658Cleaning up temporary data from old regions at 1733574301665 (+7 ms)Running coprocessor post-open hooks at 1733574301667 (+2 ms)Region opened successfully at 1733574301680 (+13 ms) 2024-12-07T12:25:01,681 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733574301633 2024-12-07T12:25:01,683 DEBUG [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T12:25:01,683 INFO [RS_OPEN_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T12:25:01,684 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,685 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 27c6fcd7dac8,34755,1733574300909, state=OPEN 2024-12-07T12:25:01,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:25:01,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T12:25:01,693 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,693 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:25:01,693 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T12:25:01,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T12:25:01,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=27c6fcd7dac8,34755,1733574300909 in 212 msec 2024-12-07T12:25:01,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T12:25:01,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 622 msec 2024-12-07T12:25:01,698 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T12:25:01,698 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T12:25:01,699 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:25:01,699 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,34755,1733574300909, seqNum=-1] 2024-12-07T12:25:01,700 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:25:01,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59753, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:25:01,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 659 msec 2024-12-07T12:25:01,705 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733574301705, completionTime=-1 2024-12-07T12:25:01,705 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T12:25:01,705 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T12:25:01,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T12:25:01,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733574361706 2024-12-07T12:25:01,706 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733574421706 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-27c6fcd7dac8:45431, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,707 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,709 DEBUG [master/27c6fcd7dac8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.771sec 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T12:25:01,711 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T12:25:01,713 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T12:25:01,713 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T12:25:01,713 INFO [master/27c6fcd7dac8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=27c6fcd7dac8,45431,1733574300858-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T12:25:01,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4457fb1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:25:01,723 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 27c6fcd7dac8,45431,-1 for getting cluster id 2024-12-07T12:25:01,723 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T12:25:01,725 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0d0c0b6f-ae56-459d-8f95-a89facc2a6e9' 2024-12-07T12:25:01,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T12:25:01,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0d0c0b6f-ae56-459d-8f95-a89facc2a6e9" 2024-12-07T12:25:01,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7671da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:25:01,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [27c6fcd7dac8,45431,-1] 2024-12-07T12:25:01,725 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T12:25:01,726 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,726 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39700, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T12:25:01,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce49b8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T12:25:01,727 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T12:25:01,728 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=27c6fcd7dac8,34755,1733574300909, seqNum=-1] 2024-12-07T12:25:01,728 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T12:25:01,729 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33674, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T12:25:01,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:01,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T12:25:01,733 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T12:25:01,733 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T12:25:01,735 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs, maxLogs=32 2024-12-07T12:25:01,735 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733574301735 2024-12-07T12:25:01,739 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1/test.com%2C8080%2C1.1733574301735 2024-12-07T12:25:01,740 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37901:37901),(127.0.0.1/127.0.0.1:39999:39999)] 2024-12-07T12:25:01,741 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733574301741 2024-12-07T12:25:01,745 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,745 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,745 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,746 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1/test.com%2C8080%2C1.1733574301735 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1/test.com%2C8080%2C1.1733574301741 2024-12-07T12:25:01,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39999:39999),(127.0.0.1/127.0.0.1:37901:37901)] 2024-12-07T12:25:01,747 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1/test.com%2C8080%2C1.1733574301735 is not closed yet, will try archiving it next time 2024-12-07T12:25:01,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741835_1011 (size=93) 2024-12-07T12:25:01,748 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741835_1011 (size=93) 2024-12-07T12:25:01,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,748 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/WALs/test.com,8080,1/test.com%2C8080%2C1.1733574301735 to hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs/test.com%2C8080%2C1.1733574301735 2024-12-07T12:25:01,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741836_1012 (size=93) 2024-12-07T12:25:01,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741836_1012 (size=93) 2024-12-07T12:25:01,751 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs 2024-12-07T12:25:01,751 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733574301741) 2024-12-07T12:25:01,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T12:25:01,752 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:25:01,752 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:25:01,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,752 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T12:25:01,752 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T12:25:01,752 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1871503964, stopped=false 2024-12-07T12:25:01,752 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=27c6fcd7dac8,45431,1733574300858 2024-12-07T12:25:01,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:25:01,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T12:25:01,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,753 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:25:01,754 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T12:25:01,754 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:25:01,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,754 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '27c6fcd7dac8,34755,1733574300909' ***** 2024-12-07T12:25:01,754 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T12:25:01,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:25:01,754 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T12:25:01,754 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T12:25:01,754 INFO [RS:0;27c6fcd7dac8:34755 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T12:25:01,755 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(959): stopping server 27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;27c6fcd7dac8:34755. 2024-12-07T12:25:01,755 DEBUG [RS:0;27c6fcd7dac8:34755 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T12:25:01,755 DEBUG [RS:0;27c6fcd7dac8:34755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T12:25:01,755 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T12:25:01,755 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T12:25:01,755 DEBUG [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T12:25:01,756 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T12:25:01,756 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T12:25:01,756 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T12:25:01,756 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T12:25:01,756 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T12:25:01,756 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-07T12:25:01,772 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/.tmp/ns/412f98046003492ab4c5697a37d5b037 is 43, key is default/ns:d/1733574301701/Put/seqid=0 2024-12-07T12:25:01,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741837_1013 (size=5153) 2024-12-07T12:25:01,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741837_1013 (size=5153) 2024-12-07T12:25:01,776 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/.tmp/ns/412f98046003492ab4c5697a37d5b037 2024-12-07T12:25:01,782 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/.tmp/ns/412f98046003492ab4c5697a37d5b037 as hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/ns/412f98046003492ab4c5697a37d5b037 2024-12-07T12:25:01,786 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/ns/412f98046003492ab4c5697a37d5b037, entries=2, sequenceid=6, filesize=5.0 K 2024-12-07T12:25:01,787 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-12-07T12:25:01,787 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T12:25:01,791 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T12:25:01,792 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T12:25:01,792 INFO [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T12:25:01,792 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733574301755Running coprocessor pre-close hooks at 1733574301755Disabling compacts and flushes for region at 1733574301755Disabling writes for close at 1733574301756 (+1 ms)Obtaining lock to block concurrent updates at 1733574301756Preparing flush snapshotting stores in 1588230740 at 1733574301756Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733574301756Flushing stores of hbase:meta,,1.1588230740 at 1733574301757 (+1 ms)Flushing 1588230740/ns: creating writer at 1733574301757Flushing 1588230740/ns: appending metadata at 1733574301771 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733574301771Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b95b2fe: reopening flushed file at 1733574301781 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1733574301787 (+6 ms)Writing region close event to WAL at 1733574301789 (+2 ms)Running coprocessor post-close hooks at 1733574301792 (+3 ms)Closed at 1733574301792 2024-12-07T12:25:01,792 DEBUG [RS_CLOSE_META-regionserver/27c6fcd7dac8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T12:25:01,956 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(976): stopping server 27c6fcd7dac8,34755,1733574300909; all regions closed. 2024-12-07T12:25:01,956 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,957 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741834_1010 (size=1152) 2024-12-07T12:25:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741834_1010 (size=1152) 2024-12-07T12:25:01,961 DEBUG [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs 2024-12-07T12:25:01,961 INFO [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C34755%2C1733574300909.meta:.meta(num 1733574301640) 2024-12-07T12:25:01,961 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,961 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,961 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,961 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,961 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:01,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741833_1009 (size=93) 2024-12-07T12:25:01,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741833_1009 (size=93) 2024-12-07T12:25:01,965 DEBUG [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/oldWALs 2024-12-07T12:25:01,965 INFO [RS:0;27c6fcd7dac8:34755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 27c6fcd7dac8%2C34755%2C1733574300909:(num 1733574301286) 2024-12-07T12:25:01,965 DEBUG [RS:0;27c6fcd7dac8:34755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T12:25:01,965 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T12:25:01,965 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:25:01,965 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.ChoreService(370): Chore service for: regionserver/27c6fcd7dac8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T12:25:01,966 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:25:01,966 INFO [regionserver/27c6fcd7dac8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:25:01,966 INFO [RS:0;27c6fcd7dac8:34755 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34755 2024-12-07T12:25:01,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/27c6fcd7dac8,34755,1733574300909 2024-12-07T12:25:01,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T12:25:01,967 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:25:01,970 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [27c6fcd7dac8,34755,1733574300909] 2024-12-07T12:25:01,971 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/27c6fcd7dac8,34755,1733574300909 already deleted, retry=false 2024-12-07T12:25:01,971 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 27c6fcd7dac8,34755,1733574300909 expired; onlineServers=0 2024-12-07T12:25:01,971 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '27c6fcd7dac8,45431,1733574300858' ***** 2024-12-07T12:25:01,971 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T12:25:01,971 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T12:25:01,971 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T12:25:01,971 DEBUG [M:0;27c6fcd7dac8:45431 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T12:25:01,971 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T12:25:01,971 DEBUG [M:0;27c6fcd7dac8:45431 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T12:25:01,971 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574301049 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.large.0-1733574301049,5,FailOnTimeoutGroup] 2024-12-07T12:25:01,971 DEBUG [master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574301049 {}] cleaner.HFileCleaner(306): Exit Thread[master/27c6fcd7dac8:0:becomeActiveMaster-HFileCleaner.small.0-1733574301049,5,FailOnTimeoutGroup] 2024-12-07T12:25:01,972 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.ChoreService(370): Chore service for: master/27c6fcd7dac8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T12:25:01,972 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T12:25:01,972 DEBUG [M:0;27c6fcd7dac8:45431 {}] master.HMaster(1795): Stopping service threads 2024-12-07T12:25:01,972 INFO [M:0;27c6fcd7dac8:45431 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T12:25:01,972 INFO [M:0;27c6fcd7dac8:45431 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T12:25:01,972 INFO [M:0;27c6fcd7dac8:45431 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T12:25:01,972 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T12:25:01,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T12:25:01,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T12:25:01,973 DEBUG [M:0;27c6fcd7dac8:45431 {}] zookeeper.ZKUtil(347): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T12:25:01,973 WARN [M:0;27c6fcd7dac8:45431 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T12:25:01,973 INFO [M:0;27c6fcd7dac8:45431 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/.lastflushedseqids 2024-12-07T12:25:01,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741838_1014 (size=108) 2024-12-07T12:25:01,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741838_1014 (size=108) 2024-12-07T12:25:01,979 INFO [M:0;27c6fcd7dac8:45431 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T12:25:01,979 INFO [M:0;27c6fcd7dac8:45431 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T12:25:01,979 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T12:25:01,979 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:01,979 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:01,979 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T12:25:01,979 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:01,979 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-07T12:25:01,994 DEBUG [M:0;27c6fcd7dac8:45431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61d3b889cc4547a6bd286c818cfd3d53 is 82, key is hbase:meta,,1/info:regioninfo/1733574301684/Put/seqid=0 2024-12-07T12:25:01,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741839_1015 (size=5672) 2024-12-07T12:25:01,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741839_1015 (size=5672) 2024-12-07T12:25:01,999 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61d3b889cc4547a6bd286c818cfd3d53 2024-12-07T12:25:02,017 DEBUG [M:0;27c6fcd7dac8:45431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c885186f174b44838dfaa548e1917534 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733574301704/Put/seqid=0 2024-12-07T12:25:02,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741840_1016 (size=5275) 2024-12-07T12:25:02,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741840_1016 (size=5275) 2024-12-07T12:25:02,022 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c885186f174b44838dfaa548e1917534 2024-12-07T12:25:02,040 DEBUG [M:0;27c6fcd7dac8:45431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/53665f9b7e954089968c5726e112abd5 is 69, key is 27c6fcd7dac8,34755,1733574300909/rs:state/1733574301144/Put/seqid=0 2024-12-07T12:25:02,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741841_1017 (size=5156) 2024-12-07T12:25:02,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741841_1017 (size=5156) 2024-12-07T12:25:02,045 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/53665f9b7e954089968c5726e112abd5 2024-12-07T12:25:02,063 DEBUG [M:0;27c6fcd7dac8:45431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/741bb3c1104a44608b40381109aae42f is 52, key is load_balancer_on/state:d/1733574301732/Put/seqid=0 2024-12-07T12:25:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741842_1018 (size=5056) 2024-12-07T12:25:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741842_1018 (size=5056) 2024-12-07T12:25:02,067 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/741bb3c1104a44608b40381109aae42f 2024-12-07T12:25:02,070 INFO [RS:0;27c6fcd7dac8:34755 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:25:02,070 INFO [RS:0;27c6fcd7dac8:34755 {}] regionserver.HRegionServer(1031): Exiting; stopping=27c6fcd7dac8,34755,1733574300909; zookeeper connection closed. 2024-12-07T12:25:02,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:02,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34755-0x1018ce27f3b0001, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:02,070 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a3a1538 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a3a1538 2024-12-07T12:25:02,071 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T12:25:02,072 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/61d3b889cc4547a6bd286c818cfd3d53 as hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/61d3b889cc4547a6bd286c818cfd3d53 2024-12-07T12:25:02,077 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/61d3b889cc4547a6bd286c818cfd3d53, entries=8, sequenceid=29, filesize=5.5 K 2024-12-07T12:25:02,077 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c885186f174b44838dfaa548e1917534 as hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c885186f174b44838dfaa548e1917534 2024-12-07T12:25:02,081 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c885186f174b44838dfaa548e1917534, entries=3, sequenceid=29, filesize=5.2 K 2024-12-07T12:25:02,082 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/53665f9b7e954089968c5726e112abd5 as hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/53665f9b7e954089968c5726e112abd5 2024-12-07T12:25:02,085 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/53665f9b7e954089968c5726e112abd5, entries=1, sequenceid=29, filesize=5.0 K 2024-12-07T12:25:02,086 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/741bb3c1104a44608b40381109aae42f as hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/741bb3c1104a44608b40381109aae42f 2024-12-07T12:25:02,089 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/55c173f8-d038-6f82-423e-f4fc78dbdc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/741bb3c1104a44608b40381109aae42f, entries=1, sequenceid=29, filesize=4.9 K 2024-12-07T12:25:02,090 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false 2024-12-07T12:25:02,091 INFO [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T12:25:02,091 DEBUG [M:0;27c6fcd7dac8:45431 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733574301979Disabling compacts and flushes for region at 1733574301979Disabling writes for close at 1733574301979Obtaining lock to block concurrent updates at 1733574301979Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733574301979Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733574301980 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733574301980Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733574301980Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733574301994 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733574301994Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733574302003 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733574302017 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733574302017Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733574302026 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733574302039 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733574302039Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733574302049 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733574302062 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733574302062Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5481aff: reopening flushed file at 1733574302072 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ddd4d82: reopening flushed file at 1733574302077 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d5e3e1c: reopening flushed file at 1733574302081 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64f81f14: reopening flushed file at 1733574302085 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false at 1733574302090 (+5 ms)Writing region close event to WAL at 1733574302091 (+1 ms)Closed at 1733574302091 2024-12-07T12:25:02,092 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:02,092 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:02,092 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:02,092 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:02,093 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T12:25:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34299 is added to blk_1073741830_1006 (size=10311) 2024-12-07T12:25:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34809 is added to blk_1073741830_1006 (size=10311) 2024-12-07T12:25:02,095 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T12:25:02,095 INFO [M:0;27c6fcd7dac8:45431 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T12:25:02,095 INFO [M:0;27c6fcd7dac8:45431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45431 2024-12-07T12:25:02,095 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T12:25:02,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:02,197 INFO [M:0;27c6fcd7dac8:45431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T12:25:02,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45431-0x1018ce27f3b0000, quorum=127.0.0.1:60080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T12:25:02,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@627a202d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:02,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22ed154c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:02,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:02,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfe0bbd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:02,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49e6dd92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:02,201 WARN [BP-1998150225-172.17.0.2-1733574300190 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:25:02,201 WARN [BP-1998150225-172.17.0.2-1733574300190 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1998150225-172.17.0.2-1733574300190 (Datanode Uuid b6fdde4f-54c7-4827-81bd-9894087f8d9b) service to localhost/127.0.0.1:39881 2024-12-07T12:25:02,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:25:02,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:25:02,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data3/current/BP-1998150225-172.17.0.2-1733574300190 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:02,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data4/current/BP-1998150225-172.17.0.2-1733574300190 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:02,202 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:25:02,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9fc2daa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T12:25:02,204 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a1af98b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:02,204 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:02,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1040cecb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:02,204 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fcd61c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:02,206 WARN [BP-1998150225-172.17.0.2-1733574300190 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T12:25:02,206 WARN [BP-1998150225-172.17.0.2-1733574300190 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1998150225-172.17.0.2-1733574300190 (Datanode Uuid 24639138-e2c1-4e12-a22e-7872fd0cba6a) service to localhost/127.0.0.1:39881 2024-12-07T12:25:02,206 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data2/current/BP-1998150225-172.17.0.2-1733574300190 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:02,206 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T12:25:02,206 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T12:25:02,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T12:25:02,206 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/cluster_cd653442-7da3-75c2-d632-37373fbfed9e/data/data1/current/BP-1998150225-172.17.0.2-1733574300190 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T12:25:02,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61edd007{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T12:25:02,213 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d664f93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T12:25:02,213 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T12:25:02,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c66b7d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T12:25:02,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b72d363{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3da66ae8-a045-9a56-f8a2-24b30b2841b7/hadoop.log.dir/,STOPPED} 2024-12-07T12:25:02,220 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T12:25:02,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T12:25:02,243 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 227) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39881 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39881 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=14 (was 14), ProcessCount=11 (was 11), AvailableMemoryMB=6090 (was 6097)