2024-11-21 11:28:13,527 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-21 11:28:13,538 main DEBUG Took 0.009405 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-21 11:28:13,538 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-21 11:28:13,538 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-21 11:28:13,539 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-21 11:28:13,540 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,547 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-21 11:28:13,558 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,560 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,560 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,561 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,561 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,561 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,562 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,562 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,563 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,563 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,564 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,564 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,564 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,565 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,565 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,565 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,566 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,566 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,566 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,567 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,567 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,567 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,568 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,568 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 11:28:13,568 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,568 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-21 11:28:13,570 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 11:28:13,571 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-21 11:28:13,573 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-21 11:28:13,573 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-21 11:28:13,574 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-21 11:28:13,574 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-21 11:28:13,582 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-21 11:28:13,585 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-21 11:28:13,586 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-21 11:28:13,586 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-21 11:28:13,587 main DEBUG createAppenders(={Console}) 2024-11-21 11:28:13,587 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-21 11:28:13,588 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-21 11:28:13,588 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-21 11:28:13,589 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-21 11:28:13,589 main DEBUG OutputStream closed 2024-11-21 11:28:13,589 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-21 11:28:13,589 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-21 11:28:13,589 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-21 11:28:13,658 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-21 11:28:13,660 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-21 11:28:13,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-21 11:28:13,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-21 11:28:13,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-21 11:28:13,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-21 11:28:13,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-21 11:28:13,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-21 11:28:13,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-21 11:28:13,663 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-21 11:28:13,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-21 11:28:13,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-21 11:28:13,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-21 11:28:13,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-21 11:28:13,665 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-21 11:28:13,665 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-21 11:28:13,665 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-21 11:28:13,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-21 11:28:13,668 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21 11:28:13,668 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-21 11:28:13,668 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-21 11:28:13,669 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-21T11:28:13,897 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418 2024-11-21 11:28:13,900 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-21 11:28:13,901 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21T11:28:13,910 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-21T11:28:13,942 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=136, ProcessCount=11, AvailableMemoryMB=5428 2024-11-21T11:28:13,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:28:13,965 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953, deleteOnExit=true 2024-11-21T11:28:13,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:28:13,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/test.cache.data in system properties and HBase conf 2024-11-21T11:28:13,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:28:13,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:28:13,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:28:13,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:28:13,970 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:28:14,064 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-21T11:28:14,159 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:28:14,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:28:14,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:28:14,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:28:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:28:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:28:14,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:28:14,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:28:14,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:28:14,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:28:14,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:28:14,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:28:14,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:28:14,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:28:14,168 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:28:14,693 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:28:15,048 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T11:28:15,133 INFO [Time-limited test {}] log.Log(170): Logging initialized @2324ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-21T11:28:15,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:28:15,285 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:28:15,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:28:15,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:28:15,308 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:28:15,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:28:15,324 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:28:15,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:28:15,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/java.io.tmpdir/jetty-localhost-33277-hadoop-hdfs-3_4_1-tests_jar-_-any-6226568837353516331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:28:15,584 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33277} 2024-11-21T11:28:15,585 INFO [Time-limited test {}] server.Server(415): Started @2777ms 2024-11-21T11:28:15,624 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:28:16,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:28:16,018 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:28:16,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:28:16,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:28:16,019 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:28:16,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:28:16,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:28:16,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/java.io.tmpdir/jetty-localhost-45449-hadoop-hdfs-3_4_1-tests_jar-_-any-9986015372119303537/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:28:16,145 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:45449} 2024-11-21T11:28:16,145 INFO [Time-limited test {}] server.Server(415): Started @3337ms 2024-11-21T11:28:16,202 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:28:16,333 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:28:16,340 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:28:16,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:28:16,342 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:28:16,342 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:28:16,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:28:16,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:28:16,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/java.io.tmpdir/jetty-localhost-34773-hadoop-hdfs-3_4_1-tests_jar-_-any-15349590706021208754/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:28:16,474 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:34773} 2024-11-21T11:28:16,474 INFO [Time-limited test {}] server.Server(415): Started @3666ms 2024-11-21T11:28:16,477 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:28:16,704 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data3/current/BP-1498678389-172.17.0.2-1732188494793/current, will proceed with Du for space computation calculation, 2024-11-21T11:28:16,705 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data4/current/BP-1498678389-172.17.0.2-1732188494793/current, will proceed with Du for space computation calculation, 2024-11-21T11:28:16,705 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data1/current/BP-1498678389-172.17.0.2-1732188494793/current, will proceed with Du for space computation calculation, 2024-11-21T11:28:16,704 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data2/current/BP-1498678389-172.17.0.2-1732188494793/current, will proceed with Du for space computation calculation, 2024-11-21T11:28:16,761 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:28:16,766 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:28:16,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe50712b2dc0892ef with lease ID 0xe7b2188b2312a047: Processing first storage report for DS-7f757270-1b86-4969-a73d-bcae7d8711bc from datanode DatanodeRegistration(127.0.0.1:36817, datanodeUuid=2d8fa1e3-3d45-4179-b7c7-0935584aeb63, infoPort=39573, infoSecurePort=0, ipcPort=33057, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793) 2024-11-21T11:28:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe50712b2dc0892ef with lease ID 0xe7b2188b2312a047: from storage DS-7f757270-1b86-4969-a73d-bcae7d8711bc node DatanodeRegistration(127.0.0.1:36817, datanodeUuid=2d8fa1e3-3d45-4179-b7c7-0935584aeb63, infoPort=39573, infoSecurePort=0, ipcPort=33057, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-21T11:28:16,847 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a410619d65fa010 with lease ID 0xe7b2188b2312a048: Processing first storage report for DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665 from datanode DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8d300cb3-ec4c-41c8-ac02-0d9baf152663, infoPort=38791, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793) 2024-11-21T11:28:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a410619d65fa010 with lease ID 0xe7b2188b2312a048: from storage DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665 node DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8d300cb3-ec4c-41c8-ac02-0d9baf152663, infoPort=38791, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T11:28:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe50712b2dc0892ef with lease ID 0xe7b2188b2312a047: Processing first storage report for DS-b394fae7-2a7b-4213-abd3-e32387d48c1c from datanode DatanodeRegistration(127.0.0.1:36817, datanodeUuid=2d8fa1e3-3d45-4179-b7c7-0935584aeb63, infoPort=39573, infoSecurePort=0, ipcPort=33057, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793) 2024-11-21T11:28:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe50712b2dc0892ef with lease ID 0xe7b2188b2312a047: from storage DS-b394fae7-2a7b-4213-abd3-e32387d48c1c node DatanodeRegistration(127.0.0.1:36817, datanodeUuid=2d8fa1e3-3d45-4179-b7c7-0935584aeb63, infoPort=39573, infoSecurePort=0, ipcPort=33057, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:28:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a410619d65fa010 with lease ID 0xe7b2188b2312a048: Processing first storage report for DS-960822e4-c655-47ec-9b32-b86dfd1c87d9 from datanode DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8d300cb3-ec4c-41c8-ac02-0d9baf152663, infoPort=38791, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793) 2024-11-21T11:28:16,848 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a410619d65fa010 with lease ID 0xe7b2188b2312a048: from storage DS-960822e4-c655-47ec-9b32-b86dfd1c87d9 node DatanodeRegistration(127.0.0.1:41313, datanodeUuid=8d300cb3-ec4c-41c8-ac02-0d9baf152663, infoPort=38791, infoSecurePort=0, ipcPort=41189, storageInfo=lv=-57;cid=testClusterID;nsid=1320198223;c=1732188494793), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:28:16,907 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418 2024-11-21T11:28:16,988 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/zookeeper_0, clientPort=63014, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:28:16,999 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63014 2024-11-21T11:28:17,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:17,014 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:17,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:28:17,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:28:17,707 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa with version=8 2024-11-21T11:28:17,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:28:17,820 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-21T11:28:18,085 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:28:18,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,101 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:28:18,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:28:18,237 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:28:18,298 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-21T11:28:18,307 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-21T11:28:18,310 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:28:18,337 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 19974 (auto-detected) 2024-11-21T11:28:18,339 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-21T11:28:18,358 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36145 2024-11-21T11:28:18,379 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36145 connecting to ZooKeeper ensemble=127.0.0.1:63014 2024-11-21T11:28:18,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:361450x0, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:28:18,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36145-0x1013a48ced70000 connected 2024-11-21T11:28:18,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:18,456 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:18,471 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:28:18,476 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa, hbase.cluster.distributed=false 2024-11-21T11:28:18,503 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:28:18,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-21T11:28:18,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36145 2024-11-21T11:28:18,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36145 2024-11-21T11:28:18,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-21T11:28:18,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36145 2024-11-21T11:28:18,626 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:28:18,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,628 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:28:18,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:28:18,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:28:18,632 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:28:18,634 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:28:18,635 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39285 2024-11-21T11:28:18,637 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39285 connecting to ZooKeeper ensemble=127.0.0.1:63014 2024-11-21T11:28:18,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:18,641 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:18,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392850x0, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:28:18,649 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39285-0x1013a48ced70001 connected 2024-11-21T11:28:18,649 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:28:18,653 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:28:18,662 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:28:18,664 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:28:18,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:28:18,670 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-21T11:28:18,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39285 2024-11-21T11:28:18,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39285 2024-11-21T11:28:18,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-21T11:28:18,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-21T11:28:18,687 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:36145 2024-11-21T11:28:18,688 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,36145,1732188497881 2024-11-21T11:28:18,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:28:18,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:28:18,696 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,36145,1732188497881 2024-11-21T11:28:18,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:28:18,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:18,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:18,719 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:28:18,720 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,36145,1732188497881 from backup master directory 2024-11-21T11:28:18,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:28:18,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,36145,1732188497881 2024-11-21T11:28:18,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:28:18,723 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:28:18,723 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,36145,1732188497881 2024-11-21T11:28:18,725 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-21T11:28:18,727 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-21T11:28:18,792 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase.id] with ID: 7a095482-6436-4178-b1d1-ac23fc70d4bf 2024-11-21T11:28:18,792 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/.tmp/hbase.id 2024-11-21T11:28:18,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:28:18,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:28:18,805 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/.tmp/hbase.id]:[hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase.id] 2024-11-21T11:28:18,846 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:18,850 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:28:18,869 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-21T11:28:18,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:18,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:18,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:28:18,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:28:18,909 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:28:18,911 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:28:18,918 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:28:18,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:28:18,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:28:18,968 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store 2024-11-21T11:28:18,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:28:18,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:28:18,992 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-21T11:28:18,995 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:18,997 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:28:18,997 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:28:18,997 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:28:18,999 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:28:18,999 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:28:19,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:28:19,001 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188498997Disabling compacts and flushes for region at 1732188498997Disabling writes for close at 1732188498999 (+2 ms)Writing region close event to WAL at 1732188499000 (+1 ms)Closed at 1732188499000 2024-11-21T11:28:19,003 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/.initializing 2024-11-21T11:28:19,003 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/WALs/7b462513bfc2,36145,1732188497881 2024-11-21T11:28:19,028 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C36145%2C1732188497881, suffix=, logDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/WALs/7b462513bfc2,36145,1732188497881, archiveDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/oldWALs, maxLogs=10 2024-11-21T11:28:19,040 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C36145%2C1732188497881.1732188499035 2024-11-21T11:28:19,060 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/WALs/7b462513bfc2,36145,1732188497881/7b462513bfc2%2C36145%2C1732188497881.1732188499035 2024-11-21T11:28:19,068 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:28:19,069 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:28:19,070 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:19,073 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,074 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:28:19,141 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:19,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:28:19,148 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:28:19,150 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:28:19,152 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:28:19,153 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:28:19,156 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:28:19,157 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,160 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,161 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,166 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,166 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,169 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:28:19,172 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:28:19,176 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:28:19,177 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731662, jitterRate=-0.06964367628097534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:28:19,183 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188499086Initializing all the Stores at 1732188499088 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188499089 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188499089Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188499090 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188499090Cleaning up temporary data from old regions at 1732188499166 (+76 ms)Region opened successfully at 1732188499183 (+17 ms) 2024-11-21T11:28:19,184 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:28:19,218 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cb1742a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:28:19,250 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:28:19,262 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:28:19,262 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:28:19,265 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:28:19,266 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-21T11:28:19,271 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-21T11:28:19,271 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:28:19,296 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:28:19,304 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:28:19,307 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:28:19,310 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:28:19,311 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:28:19,314 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:28:19,316 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:28:19,320 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:28:19,321 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:28:19,322 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:28:19,324 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:28:19,340 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:28:19,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:28:19,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:28:19,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:28:19,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,353 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,36145,1732188497881, sessionid=0x1013a48ced70000, setting cluster-up flag (Was=false) 2024-11-21T11:28:19,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,371 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:28:19,373 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36145,1732188497881 2024-11-21T11:28:19,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:19,386 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:28:19,387 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36145,1732188497881 2024-11-21T11:28:19,393 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:28:19,469 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:28:19,476 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(746): ClusterId : 7a095482-6436-4178-b1d1-ac23fc70d4bf 2024-11-21T11:28:19,479 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:28:19,479 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:28:19,484 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:28:19,485 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:28:19,486 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:28:19,489 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:28:19,489 DEBUG [RS:0;7b462513bfc2:39285 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab49708, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:28:19,492 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,36145,1732188497881 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:28:19,499 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:28:19,500 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:28:19,500 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:28:19,500 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:28:19,500 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:28:19,501 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,501 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:28:19,501 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,506 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:28:19,506 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:28:19,507 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:39285 2024-11-21T11:28:19,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188529510 2024-11-21T11:28:19,510 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:28:19,510 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:28:19,510 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:28:19,512 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:28:19,512 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,36145,1732188497881 with port=39285, startcode=1732188498588 2024-11-21T11:28:19,513 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:28:19,513 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,513 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:28:19,517 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:28:19,518 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:28:19,518 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:28:19,518 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:28:19,519 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,521 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:28:19,523 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:28:19,523 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:28:19,524 DEBUG [RS:0;7b462513bfc2:39285 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:28:19,525 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:28:19,526 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:28:19,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:28:19,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:28:19,528 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188499527,5,FailOnTimeoutGroup] 2024-11-21T11:28:19,529 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188499528,5,FailOnTimeoutGroup] 2024-11-21T11:28:19,529 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,529 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:28:19,530 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:28:19,530 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,531 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,531 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa 2024-11-21T11:28:19,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:28:19,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:28:19,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:19,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:28:19,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:28:19,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:19,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:28:19,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:28:19,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:19,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:28:19,560 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:28:19,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:19,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:28:19,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:28:19,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:19,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:19,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:28:19,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740 2024-11-21T11:28:19,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740 2024-11-21T11:28:19,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:28:19,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:28:19,576 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:28:19,579 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:28:19,587 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:28:19,588 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755827, jitterRate=-0.03891672194004059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:28:19,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188499546Initializing all the Stores at 1732188499547 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188499547Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188499548 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188499548Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188499548Cleaning up temporary data from old regions at 1732188499575 (+27 ms)Region opened successfully at 1732188499590 (+15 ms) 2024-11-21T11:28:19,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:28:19,591 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:28:19,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:28:19,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:28:19,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:28:19,593 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:28:19,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188499590Disabling compacts and flushes for region at 1732188499590Disabling writes for close at 1732188499591 (+1 ms)Writing region close event to WAL at 1732188499593 (+2 ms)Closed at 1732188499593 2024-11-21T11:28:19,597 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:28:19,597 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:28:19,606 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52029, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:28:19,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:28:19,614 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,617 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,618 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:28:19,621 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:28:19,635 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa 2024-11-21T11:28:19,635 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36571 2024-11-21T11:28:19,635 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:28:19,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:28:19,640 DEBUG [RS:0;7b462513bfc2:39285 {}] zookeeper.ZKUtil(111): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,640 WARN [RS:0;7b462513bfc2:39285 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:28:19,641 INFO [RS:0;7b462513bfc2:39285 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:28:19,641 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,643 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,39285,1732188498588] 2024-11-21T11:28:19,666 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:28:19,679 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:28:19,684 INFO [RS:0;7b462513bfc2:39285 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:28:19,684 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,685 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:28:19,691 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:28:19,692 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,692 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:28:19,693 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:28:19,694 DEBUG [RS:0;7b462513bfc2:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:28:19,695 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,696 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,696 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,696 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,696 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,696 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39285,1732188498588-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:28:19,715 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:28:19,717 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39285,1732188498588-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,717 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,718 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.Replication(171): 7b462513bfc2,39285,1732188498588 started 2024-11-21T11:28:19,735 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:19,735 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,39285,1732188498588, RpcServer on 7b462513bfc2/172.17.0.2:39285, sessionid=0x1013a48ced70001 2024-11-21T11:28:19,736 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:28:19,736 DEBUG [RS:0;7b462513bfc2:39285 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,736 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39285,1732188498588' 2024-11-21T11:28:19,737 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:28:19,738 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:28:19,738 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:28:19,738 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:28:19,739 DEBUG [RS:0;7b462513bfc2:39285 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,39285,1732188498588 2024-11-21T11:28:19,739 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39285,1732188498588' 2024-11-21T11:28:19,739 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:28:19,739 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:28:19,740 DEBUG [RS:0;7b462513bfc2:39285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:28:19,740 INFO [RS:0;7b462513bfc2:39285 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:28:19,740 INFO [RS:0;7b462513bfc2:39285 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:28:19,772 WARN [7b462513bfc2:36145 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:28:19,849 INFO [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C39285%2C1732188498588, suffix=, logDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588, archiveDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs, maxLogs=32 2024-11-21T11:28:19,851 INFO [RS:0;7b462513bfc2:39285 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188499851 2024-11-21T11:28:19,859 INFO [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188499851 2024-11-21T11:28:19,861 DEBUG [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:38791:38791)] 2024-11-21T11:28:20,025 DEBUG [7b462513bfc2:36145 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:28:20,037 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:20,044 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,39285,1732188498588, state=OPENING 2024-11-21T11:28:20,049 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:28:20,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:20,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:28:20,052 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:28:20,052 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:28:20,053 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:28:20,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,39285,1732188498588}] 2024-11-21T11:28:20,229 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:28:20,232 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36583, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:28:20,243 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:28:20,244 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:28:20,247 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C39285%2C1732188498588.meta, suffix=.meta, logDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588, archiveDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs, maxLogs=32 2024-11-21T11:28:20,249 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.meta.1732188500249.meta 2024-11-21T11:28:20,256 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.meta.1732188500249.meta 2024-11-21T11:28:20,257 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:38791:38791)] 2024-11-21T11:28:20,258 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:28:20,260 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:28:20,263 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:28:20,268 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:28:20,272 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:28:20,273 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:20,274 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:28:20,274 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:28:20,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:28:20,278 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:28:20,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:20,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:20,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:28:20,281 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:28:20,281 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:20,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:20,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:28:20,283 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:28:20,283 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:20,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:20,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:28:20,285 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:28:20,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:20,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:28:20,287 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:28:20,288 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740 2024-11-21T11:28:20,290 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740 2024-11-21T11:28:20,293 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:28:20,293 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:28:20,294 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:28:20,296 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:28:20,298 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817822, jitterRate=0.039914801716804504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:28:20,298 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:28:20,299 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188500274Writing region info on filesystem at 1732188500275 (+1 ms)Initializing all the Stores at 1732188500276 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188500276Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188500277 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188500277Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188500277Cleaning up temporary data from old regions at 1732188500293 (+16 ms)Running coprocessor post-open hooks at 1732188500298 (+5 ms)Region opened successfully at 1732188500299 (+1 ms) 2024-11-21T11:28:20,306 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188500220 2024-11-21T11:28:20,317 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:28:20,318 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:28:20,319 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:20,321 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,39285,1732188498588, state=OPEN 2024-11-21T11:28:20,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:28:20,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:28:20,331 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:28:20,331 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:28:20,331 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:20,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:28:20,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,39285,1732188498588 in 276 msec 2024-11-21T11:28:20,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:28:20,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 730 msec 2024-11-21T11:28:20,344 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:28:20,344 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:28:20,364 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:28:20,365 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,39285,1732188498588, seqNum=-1] 2024-11-21T11:28:20,386 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:28:20,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42737, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:28:20,416 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 995 msec 2024-11-21T11:28:20,416 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188500416, completionTime=-1 2024-11-21T11:28:20,419 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:28:20,419 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:28:20,446 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:28:20,446 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188560446 2024-11-21T11:28:20,446 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188620446 2024-11-21T11:28:20,446 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-11-21T11:28:20,449 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,449 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,450 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,451 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:36145, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,451 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,452 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,458 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:28:20,478 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.754sec 2024-11-21T11:28:20,479 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:28:20,481 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:28:20,481 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:28:20,482 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:28:20,482 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:28:20,483 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:28:20,483 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:28:20,493 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:28:20,494 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:28:20,495 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36145,1732188497881-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:28:20,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3653b52f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:28:20,591 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-21T11:28:20,591 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-21T11:28:20,595 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,36145,-1 for getting cluster id 2024-11-21T11:28:20,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:28:20,609 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7a095482-6436-4178-b1d1-ac23fc70d4bf' 2024-11-21T11:28:20,613 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:28:20,613 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7a095482-6436-4178-b1d1-ac23fc70d4bf" 2024-11-21T11:28:20,613 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62a3f320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:28:20,614 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,36145,-1] 2024-11-21T11:28:20,617 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:28:20,619 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:28:20,620 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58634, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:28:20,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458c2fcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:28:20,624 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:28:20,633 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,39285,1732188498588, seqNum=-1] 2024-11-21T11:28:20,633 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:28:20,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50580, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:28:20,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,36145,1732188497881 2024-11-21T11:28:20,657 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:28:20,665 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:28:20,669 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T11:28:20,674 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b462513bfc2,36145,1732188497881 2024-11-21T11:28:20,676 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5c6fd135 2024-11-21T11:28:20,677 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T11:28:20,680 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T11:28:20,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-21T11:28:20,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-21T11:28:20,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:28:20,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-21T11:28:20,695 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T11:28:20,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-21T11:28:20,697 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:20,699 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T11:28:20,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:28:20,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741835_1011 (size=389) 2024-11-21T11:28:20,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741835_1011 (size=389) 2024-11-21T11:28:20,749 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => efa707d7b95c24f6026d83e5c6f7d4c8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa 2024-11-21T11:28:20,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741836_1012 (size=72) 2024-11-21T11:28:20,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741836_1012 (size=72) 2024-11-21T11:28:20,759 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:20,760 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing efa707d7b95c24f6026d83e5c6f7d4c8, disabling compactions & flushes 2024-11-21T11:28:20,760 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:20,760 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:20,760 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. after waiting 0 ms 2024-11-21T11:28:20,760 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:20,760 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:20,760 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for efa707d7b95c24f6026d83e5c6f7d4c8: Waiting for close lock at 1732188500760Disabling compacts and flushes for region at 1732188500760Disabling writes for close at 1732188500760Writing region close event to WAL at 1732188500760Closed at 1732188500760 2024-11-21T11:28:20,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T11:28:20,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732188500762"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188500762"}]},"ts":"1732188500762"} 2024-11-21T11:28:20,773 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T11:28:20,774 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T11:28:20,777 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188500775"}]},"ts":"1732188500775"} 2024-11-21T11:28:20,782 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-21T11:28:20,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8, ASSIGN}] 2024-11-21T11:28:20,786 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8, ASSIGN 2024-11-21T11:28:20,788 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8, ASSIGN; state=OFFLINE, location=7b462513bfc2,39285,1732188498588; forceNewPlan=false, retain=false 2024-11-21T11:28:20,939 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=efa707d7b95c24f6026d83e5c6f7d4c8, regionState=OPENING, regionLocation=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:20,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8, ASSIGN because future has completed 2024-11-21T11:28:20,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure efa707d7b95c24f6026d83e5c6f7d4c8, server=7b462513bfc2,39285,1732188498588}] 2024-11-21T11:28:21,105 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:21,105 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => efa707d7b95c24f6026d83e5c6f7d4c8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:28:21,106 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,106 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:28:21,106 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,106 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,109 INFO [StoreOpener-efa707d7b95c24f6026d83e5c6f7d4c8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,111 INFO [StoreOpener-efa707d7b95c24f6026d83e5c6f7d4c8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efa707d7b95c24f6026d83e5c6f7d4c8 columnFamilyName info 2024-11-21T11:28:21,111 DEBUG [StoreOpener-efa707d7b95c24f6026d83e5c6f7d4c8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:28:21,112 INFO [StoreOpener-efa707d7b95c24f6026d83e5c6f7d4c8-1 {}] regionserver.HStore(327): Store=efa707d7b95c24f6026d83e5c6f7d4c8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:28:21,112 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,113 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,114 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,115 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,115 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,117 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,121 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:28:21,122 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened efa707d7b95c24f6026d83e5c6f7d4c8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796681, jitterRate=0.013032406568527222}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:28:21,122 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:21,124 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for efa707d7b95c24f6026d83e5c6f7d4c8: Running coprocessor pre-open hook at 1732188501107Writing region info on filesystem at 1732188501107Initializing all the Stores at 1732188501108 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188501108Cleaning up temporary data from old regions at 1732188501115 (+7 ms)Running coprocessor post-open hooks at 1732188501122 (+7 ms)Region opened successfully at 1732188501124 (+2 ms) 2024-11-21T11:28:21,126 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8., pid=6, masterSystemTime=1732188501098 2024-11-21T11:28:21,130 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:21,130 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:21,131 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=efa707d7b95c24f6026d83e5c6f7d4c8, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,39285,1732188498588 2024-11-21T11:28:21,134 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36145 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=7b462513bfc2,39285,1732188498588, table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T11:28:21,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure efa707d7b95c24f6026d83e5c6f7d4c8, server=7b462513bfc2,39285,1732188498588 because future has completed 2024-11-21T11:28:21,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T11:28:21,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure efa707d7b95c24f6026d83e5c6f7d4c8, server=7b462513bfc2,39285,1732188498588 in 193 msec 2024-11-21T11:28:21,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T11:28:21,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=efa707d7b95c24f6026d83e5c6f7d4c8, ASSIGN in 358 msec 2024-11-21T11:28:21,147 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T11:28:21,147 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188501147"}]},"ts":"1732188501147"} 2024-11-21T11:28:21,151 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-21T11:28:21,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T11:28:21,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 464 msec 2024-11-21T11:28:25,778 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-21T11:28:25,841 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T11:28:25,843 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-21T11:28:28,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:28:28,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T11:28:28,296 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-21T11:28:28,296 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-21T11:28:28,297 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:28:28,297 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T11:28:28,297 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T11:28:28,297 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T11:28:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36145 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:28:30,780 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-21T11:28:30,783 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-21T11:28:30,789 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-21T11:28:30,790 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:28:30,791 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188510790 2024-11-21T11:28:30,802 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:30,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:30,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:30,803 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:30,803 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:30,803 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188499851 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188510790 2024-11-21T11:28:30,805 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:28:30,805 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188499851 is not closed yet, will try archiving it next time 2024-11-21T11:28:30,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741833_1009 (size=451) 2024-11-21T11:28:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741833_1009 (size=451) 2024-11-21T11:28:30,808 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188499851 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188499851 2024-11-21T11:28:30,815 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8., hostname=7b462513bfc2,39285,1732188498588, seqNum=2] 2024-11-21T11:28:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:42,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efa707d7b95c24f6026d83e5c6f7d4c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:28:42,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/086cc6cafc624bf587872238913c1406 is 1080, key is row0001/info:/1732188510818/Put/seqid=0 2024-11-21T11:28:42,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741838_1014 (size=12509) 2024-11-21T11:28:42,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741838_1014 (size=12509) 2024-11-21T11:28:43,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/086cc6cafc624bf587872238913c1406 2024-11-21T11:28:43,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/086cc6cafc624bf587872238913c1406 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406 2024-11-21T11:28:43,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406, entries=7, sequenceid=11, filesize=12.2 K 2024-11-21T11:28:43,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 555ms, sequenceid=11, compaction requested=false 2024-11-21T11:28:43,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efa707d7b95c24f6026d83e5c6f7d4c8: 2024-11-21T11:28:46,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:28:50,861 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188530860 2024-11-21T11:28:51,070 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:51,070 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:51,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:51,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:51,071 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:51,071 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:28:51,071 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188510790 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188530860 2024-11-21T11:28:51,072 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:28:51,072 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188510790 is not closed yet, will try archiving it next time 2024-11-21T11:28:51,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741837_1013 (size=12399) 2024-11-21T11:28:51,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741837_1013 (size=12399) 2024-11-21T11:28:51,276 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:53,480 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:55,684 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:57,889 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:28:57,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efa707d7b95c24f6026d83e5c6f7d4c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:28:58,093 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:58,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/e7c2ca1262a94aae9c278d450cd44ba1 is 1080, key is row0008/info:/1732188524850/Put/seqid=0 2024-11-21T11:28:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741840_1016 (size=12509) 2024-11-21T11:28:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741840_1016 (size=12509) 2024-11-21T11:28:58,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/e7c2ca1262a94aae9c278d450cd44ba1 2024-11-21T11:28:58,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/e7c2ca1262a94aae9c278d450cd44ba1 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1 2024-11-21T11:28:58,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1, entries=7, sequenceid=21, filesize=12.2 K 2024-11-21T11:28:58,335 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:28:58,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 445ms, sequenceid=21, compaction requested=false 2024-11-21T11:28:58,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efa707d7b95c24f6026d83e5c6f7d4c8: 2024-11-21T11:28:58,336 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-21T11:28:58,336 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:28:58,337 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406 because midkey is the same as first or last row 2024-11-21T11:29:00,094 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:00,636 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T11:29:00,636 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T11:29:02,297 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:02,299 WARN [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:02,300 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C39285%2C1732188498588:(num 1732188530860) roll requested 2024-11-21T11:29:02,301 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188542301 2024-11-21T11:29:02,509 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:02,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:02,510 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:02,510 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:02,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:02,510 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:02,510 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188530860 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188542301 2024-11-21T11:29:02,511 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:29:02,511 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188530860 is not closed yet, will try archiving it next time 2024-11-21T11:29:02,511 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188510790 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188510790 2024-11-21T11:29:02,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741839_1015 (size=7739) 2024-11-21T11:29:02,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741839_1015 (size=7739) 2024-11-21T11:29:04,502 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:06,106 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region efa707d7b95c24f6026d83e5c6f7d4c8, had cached 0 bytes from a total of 25018 2024-11-21T11:29:06,706 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:08,910 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:11,115 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:13,117 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T11:29:13,117 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188553117 2024-11-21T11:29:16,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:29:18,126 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:18,128 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:18,128 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C39285%2C1732188498588:(num 1732188553117) roll requested 2024-11-21T11:29:18,128 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:18,128 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:18,128 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:18,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:18,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:18,129 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188542301 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188553117 2024-11-21T11:29:18,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741841_1017 (size=4753) 2024-11-21T11:29:18,132 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:29:18,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741841_1017 (size=4753) 2024-11-21T11:29:18,132 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188542301 is not closed yet, will try archiving it next time 2024-11-21T11:29:18,132 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188558132 2024-11-21T11:29:23,135 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:23,136 WARN [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:23,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:29:23,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efa707d7b95c24f6026d83e5c6f7d4c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:29:23,143 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:23,143 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:25,137 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T11:29:28,138 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:28,138 WARN [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:28,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:28,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:28,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:28,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:28,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:28,140 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188553117 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188558132 2024-11-21T11:29:28,142 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:29:28,142 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188553117 is not closed yet, will try archiving it next time 2024-11-21T11:29:28,142 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C39285%2C1732188498588:(num 1732188558132) roll requested 2024-11-21T11:29:28,143 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188568142 2024-11-21T11:29:28,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/4070bc39d48c40d2aae773637357c58b is 1080, key is row0015/info:/1732188539892/Put/seqid=0 2024-11-21T11:29:28,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741842_1018 (size=1569) 2024-11-21T11:29:28,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741842_1018 (size=1569) 2024-11-21T11:29:28,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741844_1020 (size=12509) 2024-11-21T11:29:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741844_1020 (size=12509) 2024-11-21T11:29:28,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/4070bc39d48c40d2aae773637357c58b 2024-11-21T11:29:28,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/4070bc39d48c40d2aae773637357c58b as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b 2024-11-21T11:29:28,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-21T11:29:33,153 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:33,153 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:33,185 INFO [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:33,185 WARN [FSHLog-0-hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa-prefix:7b462513bfc2,39285,1732188498588 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41313,DS-b25eccad-e2cf-4a6c-8bf2-7df6b27c3665,DISK], DatanodeInfoWithStorage[127.0.0.1:36817,DS-7f757270-1b86-4969-a73d-bcae7d8711bc,DISK]] 2024-11-21T11:29:33,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 10049ms, sequenceid=31, compaction requested=true 2024-11-21T11:29:33,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efa707d7b95c24f6026d83e5c6f7d4c8: 2024-11-21T11:29:33,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,186 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-21T11:29:33,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,186 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:29:33,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,186 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406 because midkey is the same as first or last row 2024-11-21T11:29:33,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,186 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188558132 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188568142 2024-11-21T11:29:33,187 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39573:39573),(127.0.0.1/127.0.0.1:38791:38791)] 2024-11-21T11:29:33,187 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188558132 is not closed yet, will try archiving it next time 2024-11-21T11:29:33,187 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188530860 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188530860 2024-11-21T11:29:33,187 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C39285%2C1732188498588:(num 1732188573187) roll requested 2024-11-21T11:29:33,188 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store efa707d7b95c24f6026d83e5c6f7d4c8:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:29:33,188 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188573187 2024-11-21T11:29:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741843_1019 (size=438) 2024-11-21T11:29:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741843_1019 (size=438) 2024-11-21T11:29:33,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:29:33,191 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188542301 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188542301 2024-11-21T11:29:33,192 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188553117 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188553117 2024-11-21T11:29:33,192 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:29:33,193 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188558132 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188558132 2024-11-21T11:29:33,195 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:29:33,196 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HStore(1541): efa707d7b95c24f6026d83e5c6f7d4c8/info is initiating minor compaction (all files) 2024-11-21T11:29:33,197 INFO [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of efa707d7b95c24f6026d83e5c6f7d4c8/info in TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:33,197 INFO [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b] into tmpdir=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp, totalSize=36.6 K 2024-11-21T11:29:33,198 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting 086cc6cafc624bf587872238913c1406, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732188510818 2024-11-21T11:29:33,199 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting e7c2ca1262a94aae9c278d450cd44ba1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732188524850 2024-11-21T11:29:33,199 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,200 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,200 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4070bc39d48c40d2aae773637357c58b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732188539892 2024-11-21T11:29:33,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,200 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188568142 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188573187 2024-11-21T11:29:33,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741845_1021 (size=93) 2024-11-21T11:29:33,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741845_1021 (size=93) 2024-11-21T11:29:33,203 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188568142 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs/7b462513bfc2%2C39285%2C1732188498588.1732188568142 2024-11-21T11:29:33,209 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:29:33,209 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39285%2C1732188498588.1732188573209 2024-11-21T11:29:33,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,218 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,218 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:33,219 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188573187 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188573209 2024-11-21T11:29:33,220 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38791:38791),(127.0.0.1/127.0.0.1:39573:39573)] 2024-11-21T11:29:33,221 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/WALs/7b462513bfc2,39285,1732188498588/7b462513bfc2%2C39285%2C1732188498588.1732188573187 is not closed yet, will try archiving it next time 2024-11-21T11:29:33,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741846_1022 (size=1258) 2024-11-21T11:29:33,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741846_1022 (size=1258) 2024-11-21T11:29:33,236 INFO [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): efa707d7b95c24f6026d83e5c6f7d4c8#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:29:33,237 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/fc007f88d464440abf43fdd2728b3f70 is 1080, key is row0001/info:/1732188510818/Put/seqid=0 2024-11-21T11:29:33,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741848_1024 (size=27710) 2024-11-21T11:29:33,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741848_1024 (size=27710) 2024-11-21T11:29:33,267 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/fc007f88d464440abf43fdd2728b3f70 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/fc007f88d464440abf43fdd2728b3f70 2024-11-21T11:29:33,291 INFO [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in efa707d7b95c24f6026d83e5c6f7d4c8/info of efa707d7b95c24f6026d83e5c6f7d4c8 into fc007f88d464440abf43fdd2728b3f70(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:29:33,291 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for efa707d7b95c24f6026d83e5c6f7d4c8: 2024-11-21T11:29:33,294 INFO [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8., storeName=efa707d7b95c24f6026d83e5c6f7d4c8/info, priority=13, startTime=1732188573187; duration=0sec 2024-11-21T11:29:33,294 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-21T11:29:33,294 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/fc007f88d464440abf43fdd2728b3f70 because midkey is the same as first or last row 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/fc007f88d464440abf43fdd2728b3f70 because midkey is the same as first or last row 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/fc007f88d464440abf43fdd2728b3f70 because midkey is the same as first or last row 2024-11-21T11:29:33,295 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:29:33,296 DEBUG [RS:0;7b462513bfc2:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: efa707d7b95c24f6026d83e5c6f7d4c8:info 2024-11-21T11:29:45,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:29:45,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing efa707d7b95c24f6026d83e5c6f7d4c8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:29:45,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/5355c67df05c4fc9aa5461dc288c08ed is 1080, key is row0022/info:/1732188573211/Put/seqid=0 2024-11-21T11:29:45,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741849_1025 (size=12509) 2024-11-21T11:29:45,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741849_1025 (size=12509) 2024-11-21T11:29:45,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/5355c67df05c4fc9aa5461dc288c08ed 2024-11-21T11:29:45,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/5355c67df05c4fc9aa5461dc288c08ed as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/5355c67df05c4fc9aa5461dc288c08ed 2024-11-21T11:29:45,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/5355c67df05c4fc9aa5461dc288c08ed, entries=7, sequenceid=42, filesize=12.2 K 2024-11-21T11:29:45,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 36ms, sequenceid=42, compaction requested=false 2024-11-21T11:29:45,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for efa707d7b95c24f6026d83e5c6f7d4c8: 2024-11-21T11:29:45,273 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-21T11:29:45,273 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:29:45,273 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/fc007f88d464440abf43fdd2728b3f70 because midkey is the same as first or last row 2024-11-21T11:29:46,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:29:51,107 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region efa707d7b95c24f6026d83e5c6f7d4c8, had cached 0 bytes from a total of 40219 2024-11-21T11:29:53,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:29:53,250 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:29:53,250 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:53,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:53,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:53,256 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:29:53,256 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:29:53,256 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=888866681, stopped=false 2024-11-21T11:29:53,256 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,36145,1732188497881 2024-11-21T11:29:53,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:53,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:53,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:53,258 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:29:53,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:53,259 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:29:53,259 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:53,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:53,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:53,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:53,259 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,39285,1732188498588' ***** 2024-11-21T11:29:53,259 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:29:53,260 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:29:53,260 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:29:53,260 INFO [RS:0;7b462513bfc2:39285 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:29:53,260 INFO [RS:0;7b462513bfc2:39285 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:29:53,261 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(3091): Received CLOSE for efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:29:53,261 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,39285,1732188498588 2024-11-21T11:29:53,261 INFO [RS:0;7b462513bfc2:39285 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:29:53,261 INFO [RS:0;7b462513bfc2:39285 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:39285. 2024-11-21T11:29:53,261 DEBUG [RS:0;7b462513bfc2:39285 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:53,261 DEBUG [RS:0;7b462513bfc2:39285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:53,262 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing efa707d7b95c24f6026d83e5c6f7d4c8, disabling compactions & flushes 2024-11-21T11:29:53,262 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:29:53,262 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:29:53,262 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:53,262 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:29:53,262 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:53,262 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:29:53,262 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. after waiting 0 ms 2024-11-21T11:29:53,262 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:53,262 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing efa707d7b95c24f6026d83e5c6f7d4c8 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-21T11:29:53,262 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T11:29:53,262 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1325): Online Regions={efa707d7b95c24f6026d83e5c6f7d4c8=TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T11:29:53,262 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:29:53,262 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:29:53,263 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:29:53,263 DEBUG [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, efa707d7b95c24f6026d83e5c6f7d4c8 2024-11-21T11:29:53,263 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:29:53,263 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:29:53,263 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-21T11:29:53,268 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/33fc760c8bc14b0a9163fa64799b1b9a is 1080, key is row0029/info:/1732188587238/Put/seqid=0 2024-11-21T11:29:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741850_1026 (size=8193) 2024-11-21T11:29:53,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741850_1026 (size=8193) 2024-11-21T11:29:53,278 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/33fc760c8bc14b0a9163fa64799b1b9a 2024-11-21T11:29:53,286 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/info/c37263ffa033432682c04ef23f0b6730 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8./info:regioninfo/1732188501131/Put/seqid=0 2024-11-21T11:29:53,288 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/.tmp/info/33fc760c8bc14b0a9163fa64799b1b9a as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/33fc760c8bc14b0a9163fa64799b1b9a 2024-11-21T11:29:53,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741851_1027 (size=7016) 2024-11-21T11:29:53,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741851_1027 (size=7016) 2024-11-21T11:29:53,294 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/info/c37263ffa033432682c04ef23f0b6730 2024-11-21T11:29:53,297 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/33fc760c8bc14b0a9163fa64799b1b9a, entries=3, sequenceid=48, filesize=8.0 K 2024-11-21T11:29:53,299 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 37ms, sequenceid=48, compaction requested=true 2024-11-21T11:29:53,299 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b] to archive 2024-11-21T11:29:53,303 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:29:53,308 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/archive/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/086cc6cafc624bf587872238913c1406 2024-11-21T11:29:53,310 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1 to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/archive/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/e7c2ca1262a94aae9c278d450cd44ba1 2024-11-21T11:29:53,312 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/archive/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/info/4070bc39d48c40d2aae773637357c58b 2024-11-21T11:29:53,320 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/ns/fdd467a4e0194b9f9105f186ae3300b8 is 43, key is default/ns:d/1732188500393/Put/seqid=0 2024-11-21T11:29:53,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741852_1028 (size=5153) 2024-11-21T11:29:53,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741852_1028 (size=5153) 2024-11-21T11:29:53,330 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/ns/fdd467a4e0194b9f9105f186ae3300b8 2024-11-21T11:29:53,326 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7b462513bfc2:36145 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T11:29:53,331 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [086cc6cafc624bf587872238913c1406=12509, e7c2ca1262a94aae9c278d450cd44ba1=12509, 4070bc39d48c40d2aae773637357c58b=12509] 2024-11-21T11:29:53,338 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/default/TestLogRolling-testSlowSyncLogRolling/efa707d7b95c24f6026d83e5c6f7d4c8/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-21T11:29:53,341 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:53,342 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for efa707d7b95c24f6026d83e5c6f7d4c8: Waiting for close lock at 1732188593261Running coprocessor pre-close hooks at 1732188593261Disabling compacts and flushes for region at 1732188593261Disabling writes for close at 1732188593262 (+1 ms)Obtaining lock to block concurrent updates at 1732188593262Preparing flush snapshotting stores in efa707d7b95c24f6026d83e5c6f7d4c8 at 1732188593262Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732188593262Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. at 1732188593264 (+2 ms)Flushing efa707d7b95c24f6026d83e5c6f7d4c8/info: creating writer at 1732188593264Flushing efa707d7b95c24f6026d83e5c6f7d4c8/info: appending metadata at 1732188593268 (+4 ms)Flushing efa707d7b95c24f6026d83e5c6f7d4c8/info: closing flushed file at 1732188593268Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44605df0: reopening flushed file at 1732188593287 (+19 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for efa707d7b95c24f6026d83e5c6f7d4c8 in 37ms, sequenceid=48, compaction requested=true at 1732188593299 (+12 ms)Writing region close event to WAL at 1732188593332 (+33 ms)Running coprocessor post-close hooks at 1732188593339 (+7 ms)Closed at 1732188593341 (+2 ms) 2024-11-21T11:29:53,342 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732188500681.efa707d7b95c24f6026d83e5c6f7d4c8. 2024-11-21T11:29:53,364 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/table/aa53650285614aa9a57810d0964b82cd is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732188501147/Put/seqid=0 2024-11-21T11:29:53,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741853_1029 (size=5396) 2024-11-21T11:29:53,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741853_1029 (size=5396) 2024-11-21T11:29:53,371 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/table/aa53650285614aa9a57810d0964b82cd 2024-11-21T11:29:53,381 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/info/c37263ffa033432682c04ef23f0b6730 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/info/c37263ffa033432682c04ef23f0b6730 2024-11-21T11:29:53,391 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/info/c37263ffa033432682c04ef23f0b6730, entries=10, sequenceid=11, filesize=6.9 K 2024-11-21T11:29:53,393 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/ns/fdd467a4e0194b9f9105f186ae3300b8 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/ns/fdd467a4e0194b9f9105f186ae3300b8 2024-11-21T11:29:53,402 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/ns/fdd467a4e0194b9f9105f186ae3300b8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T11:29:53,404 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/.tmp/table/aa53650285614aa9a57810d0964b82cd as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/table/aa53650285614aa9a57810d0964b82cd 2024-11-21T11:29:53,413 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/table/aa53650285614aa9a57810d0964b82cd, entries=2, sequenceid=11, filesize=5.3 K 2024-11-21T11:29:53,415 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false 2024-11-21T11:29:53,421 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T11:29:53,422 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:29:53,422 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:53,422 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188593262Running coprocessor pre-close hooks at 1732188593262Disabling compacts and flushes for region at 1732188593262Disabling writes for close at 1732188593263 (+1 ms)Obtaining lock to block concurrent updates at 1732188593263Preparing flush snapshotting stores in 1588230740 at 1732188593263Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732188593263Flushing stores of hbase:meta,,1.1588230740 at 1732188593264 (+1 ms)Flushing 1588230740/info: creating writer at 1732188593264Flushing 1588230740/info: appending metadata at 1732188593285 (+21 ms)Flushing 1588230740/info: closing flushed file at 1732188593285Flushing 1588230740/ns: creating writer at 1732188593303 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732188593320 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732188593320Flushing 1588230740/table: creating writer at 1732188593341 (+21 ms)Flushing 1588230740/table: appending metadata at 1732188593363 (+22 ms)Flushing 1588230740/table: closing flushed file at 1732188593363Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b43a964: reopening flushed file at 1732188593379 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32e5c4cd: reopening flushed file at 1732188593391 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@622d6704: reopening flushed file at 1732188593403 (+12 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false at 1732188593415 (+12 ms)Writing region close event to WAL at 1732188593416 (+1 ms)Running coprocessor post-close hooks at 1732188593422 (+6 ms)Closed at 1732188593422 2024-11-21T11:29:53,423 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:53,463 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,39285,1732188498588; all regions closed. 2024-11-21T11:29:53,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,465 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,465 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,465 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741834_1010 (size=3066) 2024-11-21T11:29:53,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741834_1010 (size=3066) 2024-11-21T11:29:53,473 DEBUG [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs 2024-11-21T11:29:53,473 INFO [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C39285%2C1732188498588.meta:.meta(num 1732188500249) 2024-11-21T11:29:53,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,473 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741847_1023 (size=12695) 2024-11-21T11:29:53,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741847_1023 (size=12695) 2024-11-21T11:29:53,480 DEBUG [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/oldWALs 2024-11-21T11:29:53,480 INFO [RS:0;7b462513bfc2:39285 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C39285%2C1732188498588:(num 1732188573209) 2024-11-21T11:29:53,480 DEBUG [RS:0;7b462513bfc2:39285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:53,481 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:29:53,481 INFO [RS:0;7b462513bfc2:39285 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:29:53,481 INFO [RS:0;7b462513bfc2:39285 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T11:29:53,481 INFO [RS:0;7b462513bfc2:39285 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:29:53,481 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:29:53,482 INFO [RS:0;7b462513bfc2:39285 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39285 2024-11-21T11:29:53,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,39285,1732188498588 2024-11-21T11:29:53,486 INFO [RS:0;7b462513bfc2:39285 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:29:53,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:29:53,490 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,39285,1732188498588] 2024-11-21T11:29:53,492 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,39285,1732188498588 already deleted, retry=false 2024-11-21T11:29:53,492 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,39285,1732188498588 expired; onlineServers=0 2024-11-21T11:29:53,492 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,36145,1732188497881' ***** 2024-11-21T11:29:53,492 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:29:53,492 INFO [M:0;7b462513bfc2:36145 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:29:53,493 INFO [M:0;7b462513bfc2:36145 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:29:53,493 DEBUG [M:0;7b462513bfc2:36145 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:29:53,493 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:29:53,493 DEBUG [M:0;7b462513bfc2:36145 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:29:53,493 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188499527 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188499527,5,FailOnTimeoutGroup] 2024-11-21T11:29:53,493 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188499528 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188499528,5,FailOnTimeoutGroup] 2024-11-21T11:29:53,493 INFO [M:0;7b462513bfc2:36145 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:29:53,493 INFO [M:0;7b462513bfc2:36145 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:29:53,493 DEBUG [M:0;7b462513bfc2:36145 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:29:53,494 INFO [M:0;7b462513bfc2:36145 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:29:53,494 INFO [M:0;7b462513bfc2:36145 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:29:53,494 INFO [M:0;7b462513bfc2:36145 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:29:53,494 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:29:53,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:29:53,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:53,495 DEBUG [M:0;7b462513bfc2:36145 {}] zookeeper.ZKUtil(347): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:29:53,495 WARN [M:0;7b462513bfc2:36145 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:29:53,496 INFO [M:0;7b462513bfc2:36145 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/.lastflushedseqids 2024-11-21T11:29:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741854_1030 (size=130) 2024-11-21T11:29:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741854_1030 (size=130) 2024-11-21T11:29:53,510 INFO [M:0;7b462513bfc2:36145 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:29:53,510 INFO [M:0;7b462513bfc2:36145 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:29:53,510 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:29:53,510 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:53,510 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:53,510 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:29:53,510 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:53,510 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-21T11:29:53,529 DEBUG [M:0;7b462513bfc2:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38f6437fabb14e4382fa02873e8cf08d is 82, key is hbase:meta,,1/info:regioninfo/1732188500319/Put/seqid=0 2024-11-21T11:29:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741855_1031 (size=5672) 2024-11-21T11:29:53,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741855_1031 (size=5672) 2024-11-21T11:29:53,536 INFO [M:0;7b462513bfc2:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38f6437fabb14e4382fa02873e8cf08d 2024-11-21T11:29:53,562 DEBUG [M:0;7b462513bfc2:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83669daa1fd34166a29d81f8a54dcd08 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732188501154/Put/seqid=0 2024-11-21T11:29:53,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741856_1032 (size=6247) 2024-11-21T11:29:53,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741856_1032 (size=6247) 2024-11-21T11:29:53,569 INFO [M:0;7b462513bfc2:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83669daa1fd34166a29d81f8a54dcd08 2024-11-21T11:29:53,577 INFO [M:0;7b462513bfc2:36145 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 83669daa1fd34166a29d81f8a54dcd08 2024-11-21T11:29:53,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:53,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x1013a48ced70001, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:53,590 INFO [RS:0;7b462513bfc2:39285 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:29:53,590 INFO [RS:0;7b462513bfc2:39285 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,39285,1732188498588; zookeeper connection closed. 2024-11-21T11:29:53,591 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@473378e1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@473378e1 2024-11-21T11:29:53,591 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:29:53,601 DEBUG [M:0;7b462513bfc2:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/adf783eda2f04a15b1034f8fea554854 is 69, key is 7b462513bfc2,39285,1732188498588/rs:state/1732188499620/Put/seqid=0 2024-11-21T11:29:53,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741857_1033 (size=5156) 2024-11-21T11:29:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741857_1033 (size=5156) 2024-11-21T11:29:53,608 INFO [M:0;7b462513bfc2:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/adf783eda2f04a15b1034f8fea554854 2024-11-21T11:29:53,633 DEBUG [M:0;7b462513bfc2:36145 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/106e3c13b717412ebd106989751341b2 is 52, key is load_balancer_on/state:d/1732188500661/Put/seqid=0 2024-11-21T11:29:53,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741858_1034 (size=5056) 2024-11-21T11:29:53,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741858_1034 (size=5056) 2024-11-21T11:29:53,641 INFO [M:0;7b462513bfc2:36145 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/106e3c13b717412ebd106989751341b2 2024-11-21T11:29:53,650 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/38f6437fabb14e4382fa02873e8cf08d as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38f6437fabb14e4382fa02873e8cf08d 2024-11-21T11:29:53,656 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/38f6437fabb14e4382fa02873e8cf08d, entries=8, sequenceid=59, filesize=5.5 K 2024-11-21T11:29:53,657 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83669daa1fd34166a29d81f8a54dcd08 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/83669daa1fd34166a29d81f8a54dcd08 2024-11-21T11:29:53,664 INFO [M:0;7b462513bfc2:36145 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 83669daa1fd34166a29d81f8a54dcd08 2024-11-21T11:29:53,664 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/83669daa1fd34166a29d81f8a54dcd08, entries=6, sequenceid=59, filesize=6.1 K 2024-11-21T11:29:53,665 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/adf783eda2f04a15b1034f8fea554854 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/adf783eda2f04a15b1034f8fea554854 2024-11-21T11:29:53,672 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/adf783eda2f04a15b1034f8fea554854, entries=1, sequenceid=59, filesize=5.0 K 2024-11-21T11:29:53,673 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/106e3c13b717412ebd106989751341b2 as hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/106e3c13b717412ebd106989751341b2 2024-11-21T11:29:53,682 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/106e3c13b717412ebd106989751341b2, entries=1, sequenceid=59, filesize=4.9 K 2024-11-21T11:29:53,684 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false 2024-11-21T11:29:53,686 INFO [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:53,686 DEBUG [M:0;7b462513bfc2:36145 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188593510Disabling compacts and flushes for region at 1732188593510Disabling writes for close at 1732188593510Obtaining lock to block concurrent updates at 1732188593510Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188593510Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732188593511 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188593512 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188593512Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188593528 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188593528Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188593543 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188593561 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188593561Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188593577 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188593600 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188593600Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188593616 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188593632 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188593632Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3609ff7c: reopening flushed file at 1732188593649 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61795b8c: reopening flushed file at 1732188593656 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@131a11c6: reopening flushed file at 1732188593664 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ff74931: reopening flushed file at 1732188593672 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false at 1732188593684 (+12 ms)Writing region close event to WAL at 1732188593686 (+2 ms)Closed at 1732188593686 2024-11-21T11:29:53,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,688 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:53,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36817 is added to blk_1073741830_1006 (size=27973) 2024-11-21T11:29:53,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41313 is added to blk_1073741830_1006 (size=27973) 2024-11-21T11:29:53,692 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:29:53,692 INFO [M:0;7b462513bfc2:36145 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:29:53,692 INFO [M:0;7b462513bfc2:36145 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36145 2024-11-21T11:29:53,693 INFO [M:0;7b462513bfc2:36145 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:29:53,700 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:29:53,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:53,795 INFO [M:0;7b462513bfc2:36145 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:29:53,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36145-0x1013a48ced70000, quorum=127.0.0.1:63014, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:53,800 WARN [BP-1498678389-172.17.0.2-1732188494793 heartbeating to localhost/127.0.0.1:36571 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1498678389-172.17.0.2-1732188494793 (Datanode Uuid 8d300cb3-ec4c-41c8-ac02-0d9baf152663) service to localhost/127.0.0.1:36571 2024-11-21T11:29:53,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:53,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data3/current/BP-1498678389-172.17.0.2-1732188494793 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:53,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data4/current/BP-1498678389-172.17.0.2-1732188494793 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:53,805 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:53,805 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:53,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:53,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:53,809 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:29:53,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:53,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:53,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:53,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:53,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:53,816 WARN [BP-1498678389-172.17.0.2-1732188494793 heartbeating to localhost/127.0.0.1:36571 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:29:53,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:29:53,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:29:53,816 WARN [BP-1498678389-172.17.0.2-1732188494793 heartbeating to localhost/127.0.0.1:36571 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1498678389-172.17.0.2-1732188494793 (Datanode Uuid 2d8fa1e3-3d45-4179-b7c7-0935584aeb63) service to localhost/127.0.0.1:36571 2024-11-21T11:29:53,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data1/current/BP-1498678389-172.17.0.2-1732188494793 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:53,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/cluster_17cb8813-2406-22a5-3724-0dea327a7953/data/data2/current/BP-1498678389-172.17.0.2-1732188494793 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:53,817 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:29:53,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:29:53,833 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:53,833 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:53,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:53,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:53,842 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:29:53,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:29:53,884 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36571 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36571 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:36571 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/7b462513bfc2:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: master/7b462513bfc2:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:36571 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7b462513bfc2:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@efc635d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36571 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:36571 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36571 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36571 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=200 (was 136) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5103 (was 5428) 2024-11-21T11:29:53,892 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=200, ProcessCount=11, AvailableMemoryMB=5101 2024-11-21T11:29:53,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:29:53,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.log.dir so I do NOT create it in target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10 2024-11-21T11:29:53,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/939c257c-8def-a3ae-143f-b5143c822418/hadoop.tmp.dir so I do NOT create it in target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10 2024-11-21T11:29:53,893 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8, deleteOnExit=true 2024-11-21T11:29:53,893 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/test.cache.data in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:29:53,894 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:29:53,894 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:29:53,895 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:29:53,910 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:29:53,993 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:53,999 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:54,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:54,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:54,000 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:29:54,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:54,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:54,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:54,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49a88a00{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/java.io.tmpdir/jetty-localhost-42747-hadoop-hdfs-3_4_1-tests_jar-_-any-9004493963606541897/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:29:54,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:42747} 2024-11-21T11:29:54,122 INFO [Time-limited test {}] server.Server(415): Started @101314ms 2024-11-21T11:29:54,136 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:29:54,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:54,219 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:54,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:54,220 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:54,220 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:29:54,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:54,221 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4edee9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:54,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@542ee468{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/java.io.tmpdir/jetty-localhost-44345-hadoop-hdfs-3_4_1-tests_jar-_-any-6360122172214713516/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:54,344 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75c88313{HTTP/1.1, (http/1.1)}{localhost:44345} 2024-11-21T11:29:54,344 INFO [Time-limited test {}] server.Server(415): Started @101536ms 2024-11-21T11:29:54,346 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:29:54,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:54,400 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:54,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:54,405 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:54,405 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:29:54,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:54,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b44e274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:54,462 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data1/current/BP-879865881-172.17.0.2-1732188593930/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:54,462 WARN [Thread-440 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data2/current/BP-879865881-172.17.0.2-1732188593930/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:54,486 WARN [Thread-418 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:29:54,489 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed73446b60c11889 with lease ID 0x634e5b015af002b4: Processing first storage report for DS-6048fabd-2f6f-4ae8-95f2-67561ed9306f from datanode DatanodeRegistration(127.0.0.1:37029, datanodeUuid=0b84e536-2ecf-425a-9654-0468fd01af69, infoPort=40767, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930) 2024-11-21T11:29:54,490 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed73446b60c11889 with lease ID 0x634e5b015af002b4: from storage DS-6048fabd-2f6f-4ae8-95f2-67561ed9306f node DatanodeRegistration(127.0.0.1:37029, datanodeUuid=0b84e536-2ecf-425a-9654-0468fd01af69, infoPort=40767, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:54,490 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xed73446b60c11889 with lease ID 0x634e5b015af002b4: Processing first storage report for DS-837e23a5-1346-45de-87eb-3e2411d7b1dc from datanode DatanodeRegistration(127.0.0.1:37029, datanodeUuid=0b84e536-2ecf-425a-9654-0468fd01af69, infoPort=40767, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930) 2024-11-21T11:29:54,490 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xed73446b60c11889 with lease ID 0x634e5b015af002b4: from storage DS-837e23a5-1346-45de-87eb-3e2411d7b1dc node DatanodeRegistration(127.0.0.1:37029, datanodeUuid=0b84e536-2ecf-425a-9654-0468fd01af69, infoPort=40767, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:54,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d183c93{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/java.io.tmpdir/jetty-localhost-38123-hadoop-hdfs-3_4_1-tests_jar-_-any-1875646095405511243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:54,546 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7154ca22{HTTP/1.1, (http/1.1)}{localhost:38123} 2024-11-21T11:29:54,546 INFO [Time-limited test {}] server.Server(415): Started @101738ms 2024-11-21T11:29:54,549 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:29:54,681 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data3/current/BP-879865881-172.17.0.2-1732188593930/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:54,681 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data4/current/BP-879865881-172.17.0.2-1732188593930/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:54,709 WARN [Thread-454 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:29:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedc722af32a76b2 with lease ID 0x634e5b015af002b5: Processing first storage report for DS-512323ce-eaa0-44f5-b5ec-48bf9c0ca869 from datanode DatanodeRegistration(127.0.0.1:32953, datanodeUuid=cf7d3339-a6fc-42d0-9039-a40d2043b6e7, infoPort=44089, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930) 2024-11-21T11:29:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedc722af32a76b2 with lease ID 0x634e5b015af002b5: from storage DS-512323ce-eaa0-44f5-b5ec-48bf9c0ca869 node DatanodeRegistration(127.0.0.1:32953, datanodeUuid=cf7d3339-a6fc-42d0-9039-a40d2043b6e7, infoPort=44089, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedc722af32a76b2 with lease ID 0x634e5b015af002b5: Processing first storage report for DS-50287e10-2b1c-42f5-9dda-2fe840c6a91c from datanode DatanodeRegistration(127.0.0.1:32953, datanodeUuid=cf7d3339-a6fc-42d0-9039-a40d2043b6e7, infoPort=44089, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930) 2024-11-21T11:29:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedc722af32a76b2 with lease ID 0x634e5b015af002b5: from storage DS-50287e10-2b1c-42f5-9dda-2fe840c6a91c node DatanodeRegistration(127.0.0.1:32953, datanodeUuid=cf7d3339-a6fc-42d0-9039-a40d2043b6e7, infoPort=44089, infoSecurePort=0, ipcPort=42735, storageInfo=lv=-57;cid=testClusterID;nsid=1936471837;c=1732188593930), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:54,802 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10 2024-11-21T11:29:54,809 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/zookeeper_0, clientPort=62167, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:29:54,810 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62167 2024-11-21T11:29:54,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,812 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:29:54,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:29:54,833 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5 with version=8 2024-11-21T11:29:54,833 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:29:54,836 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:29:54,836 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:29:54,837 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36597 2024-11-21T11:29:54,839 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36597 connecting to ZooKeeper ensemble=127.0.0.1:62167 2024-11-21T11:29:54,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365970x0, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:29:54,848 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36597-0x1013a4a4d040000 connected 2024-11-21T11:29:54,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:54,874 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5, hbase.cluster.distributed=false 2024-11-21T11:29:54,876 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:29:54,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36597 2024-11-21T11:29:54,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36597 2024-11-21T11:29:54,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36597 2024-11-21T11:29:54,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36597 2024-11-21T11:29:54,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36597 2024-11-21T11:29:54,896 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:29:54,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,896 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:29:54,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:54,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:29:54,897 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:29:54,897 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:29:54,898 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37469 2024-11-21T11:29:54,900 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37469 connecting to ZooKeeper ensemble=127.0.0.1:62167 2024-11-21T11:29:54,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:54,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374690x0, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:29:54,914 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:374690x0, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:54,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37469-0x1013a4a4d040001 connected 2024-11-21T11:29:54,914 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:29:54,916 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:29:54,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:29:54,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:29:54,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37469 2024-11-21T11:29:54,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37469 2024-11-21T11:29:54,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37469 2024-11-21T11:29:54,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37469 2024-11-21T11:29:54,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37469 2024-11-21T11:29:54,966 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:36597 2024-11-21T11:29:54,974 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,36597,1732188594835 2024-11-21T11:29:54,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:54,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:54,978 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,36597,1732188594835 2024-11-21T11:29:54,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:29:54,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:54,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:54,981 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:29:54,982 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,36597,1732188594835 from backup master directory 2024-11-21T11:29:54,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,36597,1732188594835 2024-11-21T11:29:54,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:54,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:54,984 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:29:54,984 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,36597,1732188594835 2024-11-21T11:29:54,992 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/hbase.id] with ID: 627897d6-a520-40b3-af06-855f7be0c1a2 2024-11-21T11:29:54,992 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/.tmp/hbase.id 2024-11-21T11:29:55,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:29:55,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:29:55,003 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/.tmp/hbase.id]:[hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/hbase.id] 2024-11-21T11:29:55,028 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:55,028 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:29:55,029 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:29:55,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:29:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:29:55,049 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:29:55,050 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:29:55,050 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:29:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:29:55,060 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store 2024-11-21T11:29:55,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:29:55,071 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:55,071 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:55,071 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188595071Disabling compacts and flushes for region at 1732188595071Disabling writes for close at 1732188595071Writing region close event to WAL at 1732188595071Closed at 1732188595071 2024-11-21T11:29:55,073 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/.initializing 2024-11-21T11:29:55,073 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/WALs/7b462513bfc2,36597,1732188594835 2024-11-21T11:29:55,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:29:55,077 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C36597%2C1732188594835, suffix=, logDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/WALs/7b462513bfc2,36597,1732188594835, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/oldWALs, maxLogs=10 2024-11-21T11:29:55,078 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C36597%2C1732188594835.1732188595078 2024-11-21T11:29:55,085 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/WALs/7b462513bfc2,36597,1732188594835/7b462513bfc2%2C36597%2C1732188594835.1732188595078 2024-11-21T11:29:55,086 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40767:40767),(127.0.0.1/127.0.0.1:44089:44089)] 2024-11-21T11:29:55,087 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:29:55,087 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:55,087 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,087 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:29:55,090 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:29:55,093 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:55,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:29:55,096 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:55,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:29:55,098 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:55,099 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,100 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,100 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,102 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,102 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,103 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:29:55,104 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:55,107 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:29:55,107 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781406, jitterRate=-0.006391569972038269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:29:55,109 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188595087Initializing all the Stores at 1732188595088 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595088Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188595088Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188595088Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188595088Cleaning up temporary data from old regions at 1732188595102 (+14 ms)Region opened successfully at 1732188595109 (+7 ms) 2024-11-21T11:29:55,109 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:29:55,113 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1534bdb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:29:55,114 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:29:55,114 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:29:55,114 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:29:55,115 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:29:55,115 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T11:29:55,116 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:29:55,116 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:29:55,120 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:29:55,120 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:29:55,122 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:29:55,122 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:29:55,123 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:29:55,124 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:29:55,125 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:29:55,126 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:29:55,127 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:29:55,128 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:29:55,130 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:29:55,132 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:29:55,134 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:29:55,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:55,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:55,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,136 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,36597,1732188594835, sessionid=0x1013a4a4d040000, setting cluster-up flag (Was=false) 2024-11-21T11:29:55,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,145 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:29:55,146 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36597,1732188594835 2024-11-21T11:29:55,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,156 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:29:55,157 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36597,1732188594835 2024-11-21T11:29:55,158 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:29:55,160 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:55,160 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:29:55,160 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:29:55,161 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,36597,1732188594835 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:29:55,162 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:55,162 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:55,162 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:55,163 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:55,163 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:29:55,163 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,163 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:29:55,163 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188625166 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:29:55,166 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:29:55,166 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:55,167 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,167 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:29:55,167 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:29:55,167 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:29:55,167 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:29:55,168 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:29:55,168 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:29:55,168 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,168 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188595168,5,FailOnTimeoutGroup] 2024-11-21T11:29:55,168 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:29:55,169 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188595168,5,FailOnTimeoutGroup] 2024-11-21T11:29:55,169 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,169 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:29:55,169 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,169 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:29:55,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:29:55,177 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:29:55,178 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5 2024-11-21T11:29:55,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:29:55,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:29:55,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:55,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:29:55,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:29:55,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:29:55,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:29:55,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:29:55,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:29:55,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:29:55,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:29:55,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:29:55,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740 2024-11-21T11:29:55,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740 2024-11-21T11:29:55,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:29:55,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:29:55,232 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:29:55,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:29:55,245 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:29:55,246 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742132, jitterRate=-0.05633111298084259}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:29:55,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188595201Initializing all the Stores at 1732188595203 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595203Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595208 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188595208Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595208Cleaning up temporary data from old regions at 1732188595231 (+23 ms)Region opened successfully at 1732188595248 (+17 ms) 2024-11-21T11:29:55,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:29:55,248 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:29:55,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:29:55,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:29:55,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:29:55,249 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(746): ClusterId : 627897d6-a520-40b3-af06-855f7be0c1a2 2024-11-21T11:29:55,249 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:29:55,249 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:55,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188595248Disabling compacts and flushes for region at 1732188595248Disabling writes for close at 1732188595249 (+1 ms)Writing region close event to WAL at 1732188595249Closed at 1732188595249 2024-11-21T11:29:55,251 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:55,251 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:29:55,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:29:55,252 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:29:55,252 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:29:55,254 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:29:55,255 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:29:55,256 DEBUG [RS:0;7b462513bfc2:37469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b7de06a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:29:55,260 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:29:55,276 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:37469 2024-11-21T11:29:55,276 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:29:55,276 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:29:55,276 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:29:55,277 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,36597,1732188594835 with port=37469, startcode=1732188594895 2024-11-21T11:29:55,278 DEBUG [RS:0;7b462513bfc2:37469 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:29:55,282 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47997, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:29:55,283 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36597 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,283 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36597 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,286 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5 2024-11-21T11:29:55,286 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35785 2024-11-21T11:29:55,286 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:29:55,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:29:55,289 DEBUG [RS:0;7b462513bfc2:37469 {}] zookeeper.ZKUtil(111): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,289 WARN [RS:0;7b462513bfc2:37469 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:29:55,289 INFO [RS:0;7b462513bfc2:37469 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:55,290 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/WALs/7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,290 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,37469,1732188594895] 2024-11-21T11:29:55,295 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:29:55,297 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:29:55,297 INFO [RS:0;7b462513bfc2:37469 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:29:55,297 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,298 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:29:55,299 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:29:55,299 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,299 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,300 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,300 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,300 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:55,300 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:55,300 DEBUG [RS:0;7b462513bfc2:37469 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,300 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,37469,1732188594895-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:29:55,318 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:29:55,319 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,37469,1732188594895-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,319 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,319 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.Replication(171): 7b462513bfc2,37469,1732188594895 started 2024-11-21T11:29:55,334 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,334 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,37469,1732188594895, RpcServer on 7b462513bfc2/172.17.0.2:37469, sessionid=0x1013a4a4d040001 2024-11-21T11:29:55,334 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:29:55,334 DEBUG [RS:0;7b462513bfc2:37469 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,334 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,37469,1732188594895' 2024-11-21T11:29:55,334 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:29:55,335 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,37469,1732188594895' 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:29:55,336 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:29:55,337 DEBUG [RS:0;7b462513bfc2:37469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:29:55,337 INFO [RS:0;7b462513bfc2:37469 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:29:55,337 INFO [RS:0;7b462513bfc2:37469 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:29:55,411 WARN [7b462513bfc2:36597 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:29:55,440 INFO [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C37469%2C1732188594895, suffix=, logDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/WALs/7b462513bfc2,37469,1732188594895, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/oldWALs, maxLogs=32 2024-11-21T11:29:55,442 INFO [RS:0;7b462513bfc2:37469 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C37469%2C1732188594895.1732188595442 2024-11-21T11:29:55,457 INFO [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/WALs/7b462513bfc2,37469,1732188594895/7b462513bfc2%2C37469%2C1732188594895.1732188595442 2024-11-21T11:29:55,464 DEBUG [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44089:44089),(127.0.0.1/127.0.0.1:40767:40767)] 2024-11-21T11:29:55,661 DEBUG [7b462513bfc2:36597 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:29:55,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,664 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,37469,1732188594895, state=OPENING 2024-11-21T11:29:55,667 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:29:55,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:55,669 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:55,669 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:29:55,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,37469,1732188594895}] 2024-11-21T11:29:55,670 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:55,823 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:29:55,827 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49883, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:29:55,833 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:29:55,833 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:55,835 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C37469%2C1732188594895.meta, suffix=.meta, logDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/WALs/7b462513bfc2,37469,1732188594895, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/oldWALs, maxLogs=32 2024-11-21T11:29:55,838 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C37469%2C1732188594895.meta.1732188595837.meta 2024-11-21T11:29:55,863 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/WALs/7b462513bfc2,37469,1732188594895/7b462513bfc2%2C37469%2C1732188594895.meta.1732188595837.meta 2024-11-21T11:29:55,865 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40767:40767),(127.0.0.1/127.0.0.1:44089:44089)] 2024-11-21T11:29:55,868 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:29:55,869 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:29:55,869 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:29:55,869 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:29:55,869 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:29:55,869 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:55,870 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:29:55,870 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:29:55,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:29:55,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:29:55,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:29:55,876 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:29:55,876 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:29:55,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:29:55,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:29:55,879 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:29:55,879 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:55,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:55,880 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:29:55,881 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740 2024-11-21T11:29:55,882 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740 2024-11-21T11:29:55,884 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:29:55,884 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:29:55,885 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:29:55,887 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:29:55,888 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785189, jitterRate=-0.0015811026096343994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:29:55,889 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:29:55,890 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188595870Writing region info on filesystem at 1732188595870Initializing all the Stores at 1732188595872 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595872Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595872Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188595872Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188595872Cleaning up temporary data from old regions at 1732188595884 (+12 ms)Running coprocessor post-open hooks at 1732188595889 (+5 ms)Region opened successfully at 1732188595890 (+1 ms) 2024-11-21T11:29:55,892 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188595823 2024-11-21T11:29:55,896 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:29:55,896 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:29:55,897 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,899 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,37469,1732188594895, state=OPEN 2024-11-21T11:29:55,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:29:55,906 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:55,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:29:55,906 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:55,907 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,37469,1732188594895 2024-11-21T11:29:55,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:29:55,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,37469,1732188594895 in 238 msec 2024-11-21T11:29:55,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:29:55,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 658 msec 2024-11-21T11:29:55,915 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:55,915 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:29:55,917 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:29:55,917 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,37469,1732188594895, seqNum=-1] 2024-11-21T11:29:55,917 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:29:55,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39981, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:29:55,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 767 msec 2024-11-21T11:29:55,938 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188595938, completionTime=-1 2024-11-21T11:29:55,938 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:29:55,938 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:29:55,941 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:29:55,941 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188655941 2024-11-21T11:29:55,941 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188715941 2024-11-21T11:29:55,941 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-21T11:29:55,941 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,942 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,942 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,942 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:36597, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,942 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,944 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:29:55,948 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.970sec 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:29:55,954 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:29:55,958 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:29:55,958 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:29:55,959 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36597,1732188594835-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:56,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@205bfd42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:56,050 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,36597,-1 for getting cluster id 2024-11-21T11:29:56,051 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:29:56,053 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '627897d6-a520-40b3-af06-855f7be0c1a2' 2024-11-21T11:29:56,053 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:29:56,054 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "627897d6-a520-40b3-af06-855f7be0c1a2" 2024-11-21T11:29:56,054 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b13da9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:56,054 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,36597,-1] 2024-11-21T11:29:56,055 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:29:56,055 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,057 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:29:56,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5d9cb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:56,059 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:29:56,060 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,37469,1732188594895, seqNum=-1] 2024-11-21T11:29:56,060 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:29:56,062 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:29:56,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,36597,1732188594835 2024-11-21T11:29:56,066 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:56,069 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:29:56,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:29:56,070 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:29:56,070 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:56,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,070 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:29:56,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:29:56,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=295137098, stopped=false 2024-11-21T11:29:56,070 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,36597,1732188594835 2024-11-21T11:29:56,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:56,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:56,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:56,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:56,074 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:29:56,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:29:56,075 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:56,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,075 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:56,075 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:56,075 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,37469,1732188594895' ***** 2024-11-21T11:29:56,075 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:29:56,075 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:29:56,075 INFO [RS:0;7b462513bfc2:37469 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:29:56,075 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:29:56,075 INFO [RS:0;7b462513bfc2:37469 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,37469,1732188594895 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:37469. 2024-11-21T11:29:56,076 DEBUG [RS:0;7b462513bfc2:37469 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:29:56,076 DEBUG [RS:0;7b462513bfc2:37469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:29:56,076 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:29:56,077 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T11:29:56,077 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T11:29:56,077 DEBUG [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T11:29:56,077 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:29:56,077 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:29:56,077 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:29:56,077 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:29:56,077 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:29:56,077 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-21T11:29:56,104 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/.tmp/ns/162a6b08dbe7426db9e22a5a0fad99b8 is 43, key is default/ns:d/1732188595920/Put/seqid=0 2024-11-21T11:29:56,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741835_1011 (size=5153) 2024-11-21T11:29:56,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741835_1011 (size=5153) 2024-11-21T11:29:56,112 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/.tmp/ns/162a6b08dbe7426db9e22a5a0fad99b8 2024-11-21T11:29:56,122 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/.tmp/ns/162a6b08dbe7426db9e22a5a0fad99b8 as hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/ns/162a6b08dbe7426db9e22a5a0fad99b8 2024-11-21T11:29:56,131 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/ns/162a6b08dbe7426db9e22a5a0fad99b8, entries=2, sequenceid=6, filesize=5.0 K 2024-11-21T11:29:56,133 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 56ms, sequenceid=6, compaction requested=false 2024-11-21T11:29:56,133 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T11:29:56,144 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T11:29:56,145 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:29:56,145 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:56,146 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188596077Running coprocessor pre-close hooks at 1732188596077Disabling compacts and flushes for region at 1732188596077Disabling writes for close at 1732188596077Obtaining lock to block concurrent updates at 1732188596078 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732188596078Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732188596078Flushing stores of hbase:meta,,1.1588230740 at 1732188596079 (+1 ms)Flushing 1588230740/ns: creating writer at 1732188596079Flushing 1588230740/ns: appending metadata at 1732188596103 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1732188596103Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a9e0859: reopening flushed file at 1732188596121 (+18 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 56ms, sequenceid=6, compaction requested=false at 1732188596133 (+12 ms)Writing region close event to WAL at 1732188596137 (+4 ms)Running coprocessor post-close hooks at 1732188596145 (+8 ms)Closed at 1732188596145 2024-11-21T11:29:56,146 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:56,277 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,37469,1732188594895; all regions closed. 2024-11-21T11:29:56,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,278 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741834_1010 (size=1152) 2024-11-21T11:29:56,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741834_1010 (size=1152) 2024-11-21T11:29:56,284 DEBUG [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/oldWALs 2024-11-21T11:29:56,285 INFO [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C37469%2C1732188594895.meta:.meta(num 1732188595837) 2024-11-21T11:29:56,285 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,285 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,285 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,285 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741833_1009 (size=93) 2024-11-21T11:29:56,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741833_1009 (size=93) 2024-11-21T11:29:56,290 DEBUG [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/oldWALs 2024-11-21T11:29:56,290 INFO [RS:0;7b462513bfc2:37469 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C37469%2C1732188594895:(num 1732188595442) 2024-11-21T11:29:56,290 DEBUG [RS:0;7b462513bfc2:37469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:56,290 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:29:56,291 INFO [RS:0;7b462513bfc2:37469 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:29:56,291 INFO [RS:0;7b462513bfc2:37469 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:29:56,291 INFO [RS:0;7b462513bfc2:37469 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:29:56,291 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:29:56,291 INFO [RS:0;7b462513bfc2:37469 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37469 2024-11-21T11:29:56,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,37469,1732188594895 2024-11-21T11:29:56,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:29:56,294 INFO [RS:0;7b462513bfc2:37469 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:29:56,295 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,37469,1732188594895] 2024-11-21T11:29:56,297 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,37469,1732188594895 already deleted, retry=false 2024-11-21T11:29:56,297 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,37469,1732188594895 expired; onlineServers=0 2024-11-21T11:29:56,297 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,36597,1732188594835' ***** 2024-11-21T11:29:56,297 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:29:56,297 INFO [M:0;7b462513bfc2:36597 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:29:56,297 INFO [M:0;7b462513bfc2:36597 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:29:56,297 DEBUG [M:0;7b462513bfc2:36597 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:29:56,297 DEBUG [M:0;7b462513bfc2:36597 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:29:56,297 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:29:56,297 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188595168 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188595168,5,FailOnTimeoutGroup] 2024-11-21T11:29:56,297 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188595168 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188595168,5,FailOnTimeoutGroup] 2024-11-21T11:29:56,297 INFO [M:0;7b462513bfc2:36597 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:29:56,298 INFO [M:0;7b462513bfc2:36597 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:29:56,298 DEBUG [M:0;7b462513bfc2:36597 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:29:56,298 INFO [M:0;7b462513bfc2:36597 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:29:56,298 INFO [M:0;7b462513bfc2:36597 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:29:56,298 INFO [M:0;7b462513bfc2:36597 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:29:56,298 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:29:56,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:29:56,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:56,301 DEBUG [M:0;7b462513bfc2:36597 {}] zookeeper.ZKUtil(347): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:29:56,301 WARN [M:0;7b462513bfc2:36597 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:29:56,302 INFO [M:0;7b462513bfc2:36597 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/.lastflushedseqids 2024-11-21T11:29:56,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741836_1012 (size=99) 2024-11-21T11:29:56,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741836_1012 (size=99) 2024-11-21T11:29:56,309 INFO [M:0;7b462513bfc2:36597 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:29:56,309 INFO [M:0;7b462513bfc2:36597 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:29:56,309 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:29:56,309 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:56,309 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:56,310 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:29:56,310 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:56,310 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-21T11:29:56,330 DEBUG [M:0;7b462513bfc2:36597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1bcba73c5c2743f6b74215fb96762991 is 82, key is hbase:meta,,1/info:regioninfo/1732188595897/Put/seqid=0 2024-11-21T11:29:56,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741837_1013 (size=5672) 2024-11-21T11:29:56,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741837_1013 (size=5672) 2024-11-21T11:29:56,337 INFO [M:0;7b462513bfc2:36597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1bcba73c5c2743f6b74215fb96762991 2024-11-21T11:29:56,362 DEBUG [M:0;7b462513bfc2:36597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/420a689b214d4a61b70ab960ac2db1ba is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732188595926/Put/seqid=0 2024-11-21T11:29:56,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741838_1014 (size=5275) 2024-11-21T11:29:56,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741838_1014 (size=5275) 2024-11-21T11:29:56,369 INFO [M:0;7b462513bfc2:36597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/420a689b214d4a61b70ab960ac2db1ba 2024-11-21T11:29:56,391 DEBUG [M:0;7b462513bfc2:36597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a60aca43bee840cb96a6b954f6b7432d is 69, key is 7b462513bfc2,37469,1732188594895/rs:state/1732188595284/Put/seqid=0 2024-11-21T11:29:56,396 INFO [RS:0;7b462513bfc2:37469 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:29:56,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:56,396 INFO [RS:0;7b462513bfc2:37469 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,37469,1732188594895; zookeeper connection closed. 2024-11-21T11:29:56,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37469-0x1013a4a4d040001, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:56,396 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34df3de {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34df3de 2024-11-21T11:29:56,396 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:29:56,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741839_1015 (size=5156) 2024-11-21T11:29:56,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741839_1015 (size=5156) 2024-11-21T11:29:56,399 INFO [M:0;7b462513bfc2:36597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a60aca43bee840cb96a6b954f6b7432d 2024-11-21T11:29:56,422 DEBUG [M:0;7b462513bfc2:36597 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe1a9691959149e3aca300263004eb6d is 52, key is load_balancer_on/state:d/1732188596067/Put/seqid=0 2024-11-21T11:29:56,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741840_1016 (size=5056) 2024-11-21T11:29:56,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741840_1016 (size=5056) 2024-11-21T11:29:56,428 INFO [M:0;7b462513bfc2:36597 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe1a9691959149e3aca300263004eb6d 2024-11-21T11:29:56,436 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1bcba73c5c2743f6b74215fb96762991 as hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1bcba73c5c2743f6b74215fb96762991 2024-11-21T11:29:56,443 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1bcba73c5c2743f6b74215fb96762991, entries=8, sequenceid=29, filesize=5.5 K 2024-11-21T11:29:56,444 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/420a689b214d4a61b70ab960ac2db1ba as hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/420a689b214d4a61b70ab960ac2db1ba 2024-11-21T11:29:56,450 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/420a689b214d4a61b70ab960ac2db1ba, entries=3, sequenceid=29, filesize=5.2 K 2024-11-21T11:29:56,451 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a60aca43bee840cb96a6b954f6b7432d as hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a60aca43bee840cb96a6b954f6b7432d 2024-11-21T11:29:56,459 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a60aca43bee840cb96a6b954f6b7432d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-21T11:29:56,461 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe1a9691959149e3aca300263004eb6d as hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fe1a9691959149e3aca300263004eb6d 2024-11-21T11:29:56,468 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/2fec6651-e78c-10f4-7cd2-2db34ebd53f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fe1a9691959149e3aca300263004eb6d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-21T11:29:56,470 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=29, compaction requested=false 2024-11-21T11:29:56,472 INFO [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:56,472 DEBUG [M:0;7b462513bfc2:36597 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188596309Disabling compacts and flushes for region at 1732188596309Disabling writes for close at 1732188596310 (+1 ms)Obtaining lock to block concurrent updates at 1732188596310Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188596310Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732188596310Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188596311 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188596311Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188596329 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188596329Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188596345 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188596361 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188596361Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188596375 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188596391 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188596391Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188596406 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188596421 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188596421Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c9cf9e1: reopening flushed file at 1732188596435 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a447804: reopening flushed file at 1732188596443 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ef5428: reopening flushed file at 1732188596450 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cd75ea5: reopening flushed file at 1732188596459 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=29, compaction requested=false at 1732188596470 (+11 ms)Writing region close event to WAL at 1732188596472 (+2 ms)Closed at 1732188596472 2024-11-21T11:29:56,472 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,473 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,473 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,473 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:29:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32953 is added to blk_1073741830_1006 (size=10311) 2024-11-21T11:29:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37029 is added to blk_1073741830_1006 (size=10311) 2024-11-21T11:29:56,476 INFO [M:0;7b462513bfc2:36597 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:29:56,477 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:29:56,477 INFO [M:0;7b462513bfc2:36597 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36597 2024-11-21T11:29:56,477 INFO [M:0;7b462513bfc2:36597 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:29:56,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:56,579 INFO [M:0;7b462513bfc2:36597 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:29:56,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36597-0x1013a4a4d040000, quorum=127.0.0.1:62167, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:29:56,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d183c93{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:56,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7154ca22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:56,585 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:56,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b44e274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:56,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:56,587 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:29:56,587 WARN [BP-879865881-172.17.0.2-1732188593930 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:29:56,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:29:56,587 WARN [BP-879865881-172.17.0.2-1732188593930 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-879865881-172.17.0.2-1732188593930 (Datanode Uuid cf7d3339-a6fc-42d0-9039-a40d2043b6e7) service to localhost/127.0.0.1:35785 2024-11-21T11:29:56,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data3/current/BP-879865881-172.17.0.2-1732188593930 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:56,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data4/current/BP-879865881-172.17.0.2-1732188593930 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:56,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:29:56,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@542ee468{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:56,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75c88313{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:56,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:56,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4edee9ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:56,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:56,603 WARN [BP-879865881-172.17.0.2-1732188593930 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:29:56,603 WARN [BP-879865881-172.17.0.2-1732188593930 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-879865881-172.17.0.2-1732188593930 (Datanode Uuid 0b84e536-2ecf-425a-9654-0468fd01af69) service to localhost/127.0.0.1:35785 2024-11-21T11:29:56,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data1/current/BP-879865881-172.17.0.2-1732188593930 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:56,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/cluster_a9dd033b-a5fa-b6fb-cbc3-e91ffa8478c8/data/data2/current/BP-879865881-172.17.0.2-1732188593930 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:29:56,604 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:29:56,604 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:29:56,605 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:29:56,614 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49a88a00{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:29:56,615 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e4256d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:29:56,615 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:29:56,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3197ca45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:29:56,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir/,STOPPED} 2024-11-21T11:29:56,625 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.log.dir so I do NOT create it in target/test-data/2e243797-0e63-958f-14d9-eece334c3056 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a17a5d8c-6c67-1e90-0462-e9b7db287d10/hadoop.tmp.dir so I do NOT create it in target/test-data/2e243797-0e63-958f-14d9-eece334c3056 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a, deleteOnExit=true 2024-11-21T11:29:56,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/test.cache.data in system properties and HBase conf 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:29:56,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:29:56,654 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:29:56,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:29:56,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:29:56,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:29:56,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:29:56,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:29:56,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:29:56,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:29:56,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:29:56,677 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:29:56,768 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:56,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:56,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:56,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:56,782 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:29:56,783 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:56,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:56,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:56,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cd2a640{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-38767-hadoop-hdfs-3_4_1-tests_jar-_-any-10928726156751467335/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:29:56,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:38767} 2024-11-21T11:29:56,913 INFO [Time-limited test {}] server.Server(415): Started @104105ms 2024-11-21T11:29:56,935 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:29:57,019 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:57,023 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:57,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:57,023 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:57,023 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:29:57,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:57,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:57,146 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ab5393f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-39399-hadoop-hdfs-3_4_1-tests_jar-_-any-14083658187338391418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:57,147 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:39399} 2024-11-21T11:29:57,147 INFO [Time-limited test {}] server.Server(415): Started @104339ms 2024-11-21T11:29:57,149 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:29:57,190 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:29:57,195 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:29:57,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:29:57,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:29:57,196 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:29:57,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:29:57,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:29:57,286 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data1/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:57,286 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data2/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:57,303 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:29:57,312 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:29:57,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbaaf9027298575a with lease ID 0x2edf52267c6427b3: Processing first storage report for DS-0e147be5-8c67-4fb3-8660-053a2b6d764e from datanode DatanodeRegistration(127.0.0.1:44593, datanodeUuid=41546d6a-8a61-4f57-a920-c2cab26d2b43, infoPort=34243, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:29:57,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbaaf9027298575a with lease ID 0x2edf52267c6427b3: from storage DS-0e147be5-8c67-4fb3-8660-053a2b6d764e node DatanodeRegistration(127.0.0.1:44593, datanodeUuid=41546d6a-8a61-4f57-a920-c2cab26d2b43, infoPort=34243, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T11:29:57,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbaaf9027298575a with lease ID 0x2edf52267c6427b3: Processing first storage report for DS-93ce3843-003c-44cc-95b5-4c31a8886f4a from datanode DatanodeRegistration(127.0.0.1:44593, datanodeUuid=41546d6a-8a61-4f57-a920-c2cab26d2b43, infoPort=34243, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:29:57,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbaaf9027298575a with lease ID 0x2edf52267c6427b3: from storage DS-93ce3843-003c-44cc-95b5-4c31a8886f4a node DatanodeRegistration(127.0.0.1:44593, datanodeUuid=41546d6a-8a61-4f57-a920-c2cab26d2b43, infoPort=34243, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:57,341 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21d5e4af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-34553-hadoop-hdfs-3_4_1-tests_jar-_-any-13707739538625534723/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:29:57,342 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:34553} 2024-11-21T11:29:57,342 INFO [Time-limited test {}] server.Server(415): Started @104534ms 2024-11-21T11:29:57,344 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:29:57,451 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data3/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:57,451 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data4/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:29:57,477 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:29:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb4c2074ef24bf4b with lease ID 0x2edf52267c6427b4: Processing first storage report for DS-302696ae-c9bb-4355-adf8-8397d225a3aa from datanode DatanodeRegistration(127.0.0.1:39055, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=46191, infoSecurePort=0, ipcPort=43387, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:29:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb4c2074ef24bf4b with lease ID 0x2edf52267c6427b4: from storage DS-302696ae-c9bb-4355-adf8-8397d225a3aa node DatanodeRegistration(127.0.0.1:39055, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=46191, infoSecurePort=0, ipcPort=43387, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb4c2074ef24bf4b with lease ID 0x2edf52267c6427b4: Processing first storage report for DS-2094c6fb-1734-43c9-8fe7-0b2b7b482b54 from datanode DatanodeRegistration(127.0.0.1:39055, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=46191, infoSecurePort=0, ipcPort=43387, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:29:57,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb4c2074ef24bf4b with lease ID 0x2edf52267c6427b4: from storage DS-2094c6fb-1734-43c9-8fe7-0b2b7b482b54 node DatanodeRegistration(127.0.0.1:39055, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=46191, infoSecurePort=0, ipcPort=43387, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:29:57,580 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056 2024-11-21T11:29:57,583 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/zookeeper_0, clientPort=52013, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:29:57,584 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52013 2024-11-21T11:29:57,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:29:57,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:29:57,598 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647 with version=8 2024-11-21T11:29:57,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:29:57,601 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:29:57,602 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:29:57,603 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36257 2024-11-21T11:29:57,605 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36257 connecting to ZooKeeper ensemble=127.0.0.1:52013 2024-11-21T11:29:57,613 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362570x0, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:29:57,614 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36257-0x1013a4a57de0000 connected 2024-11-21T11:29:57,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:57,638 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647, hbase.cluster.distributed=false 2024-11-21T11:29:57,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:29:57,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36257 2024-11-21T11:29:57,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36257 2024-11-21T11:29:57,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36257 2024-11-21T11:29:57,645 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36257 2024-11-21T11:29:57,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36257 2024-11-21T11:29:57,663 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:29:57,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:29:57,664 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40563 2024-11-21T11:29:57,665 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40563 connecting to ZooKeeper ensemble=127.0.0.1:52013 2024-11-21T11:29:57,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,668 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405630x0, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:29:57,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40563-0x1013a4a57de0001 connected 2024-11-21T11:29:57,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:29:57,673 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:29:57,674 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:29:57,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:29:57,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:29:57,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T11:29:57,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40563 2024-11-21T11:29:57,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40563 2024-11-21T11:29:57,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T11:29:57,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T11:29:57,692 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:36257 2024-11-21T11:29:57,692 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:57,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:57,695 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:29:57,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,701 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:29:57,713 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,36257,1732188597601 from backup master directory 2024-11-21T11:29:57,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:57,715 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:29:57,715 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:29:57,722 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/hbase.id] with ID: c16bed1d-0b23-4081-bb4d-70a5e054b5c7 2024-11-21T11:29:57,722 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/.tmp/hbase.id 2024-11-21T11:29:57,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:29:57,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:29:57,730 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/.tmp/hbase.id]:[hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/hbase.id] 2024-11-21T11:29:57,745 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:57,746 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:29:57,747 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:29:57,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:29:57,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:29:57,758 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:29:57,759 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:29:57,759 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:29:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:29:57,768 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store 2024-11-21T11:29:57,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:29:57,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:29:57,775 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:57,776 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:29:57,776 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:57,776 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:57,776 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:29:57,776 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:57,776 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:29:57,776 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188597776Disabling compacts and flushes for region at 1732188597776Disabling writes for close at 1732188597776Writing region close event to WAL at 1732188597776Closed at 1732188597776 2024-11-21T11:29:57,777 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/.initializing 2024-11-21T11:29:57,777 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,779 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C36257%2C1732188597601, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/oldWALs, maxLogs=10 2024-11-21T11:29:57,780 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C36257%2C1732188597601.1732188597780 2024-11-21T11:29:57,786 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 2024-11-21T11:29:57,787 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34243:34243),(127.0.0.1/127.0.0.1:46191:46191)] 2024-11-21T11:29:57,787 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:29:57,787 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:57,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:29:57,791 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:57,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:29:57,793 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:57,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:29:57,795 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:57,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:29:57,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:57,797 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,798 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,799 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,800 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,800 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,801 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:29:57,802 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:29:57,805 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:29:57,805 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880660, jitterRate=0.11981740593910217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:29:57,807 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188597788Initializing all the Stores at 1732188597789 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188597789Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188597789Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188597789Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188597789Cleaning up temporary data from old regions at 1732188597800 (+11 ms)Region opened successfully at 1732188597807 (+7 ms) 2024-11-21T11:29:57,807 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:29:57,811 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ec667af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:29:57,812 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:29:57,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:29:57,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:29:57,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:29:57,814 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T11:29:57,814 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:29:57,814 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:29:57,817 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:29:57,818 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:29:57,819 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:29:57,820 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:29:57,820 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:29:57,821 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:29:57,822 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:29:57,823 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:29:57,825 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:29:57,826 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:29:57,827 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:29:57,828 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:29:57,829 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:29:57,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:57,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:29:57,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,832 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,36257,1732188597601, sessionid=0x1013a4a57de0000, setting cluster-up flag (Was=false) 2024-11-21T11:29:57,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,842 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:29:57,843 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:57,852 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:29:57,853 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,36257,1732188597601 2024-11-21T11:29:57,854 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:29:57,856 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:57,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:29:57,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:29:57,857 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,36257,1732188597601 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:29:57,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188627859 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:29:57,859 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,860 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:29:57,860 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:29:57,860 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:29:57,860 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:57,860 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:29:57,860 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:29:57,860 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:29:57,861 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188597861,5,FailOnTimeoutGroup] 2024-11-21T11:29:57,861 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188597861,5,FailOnTimeoutGroup] 2024-11-21T11:29:57,861 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,861 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:29:57,861 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,861 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,861 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,862 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:29:57,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:29:57,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:29:57,873 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:29:57,873 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647 2024-11-21T11:29:57,880 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(746): ClusterId : c16bed1d-0b23-4081-bb4d-70a5e054b5c7 2024-11-21T11:29:57,880 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:29:57,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:29:57,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:29:57,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:57,882 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:29:57,882 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:29:57,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:29:57,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:29:57,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,885 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:29:57,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:57,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:29:57,885 DEBUG [RS:0;7b462513bfc2:40563 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cd2e80e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:29:57,886 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:29:57,886 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:57,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:29:57,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:29:57,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:57,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:29:57,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:29:57,890 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:57,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:57,891 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:29:57,892 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740 2024-11-21T11:29:57,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740 2024-11-21T11:29:57,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:29:57,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:29:57,896 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:29:57,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:29:57,902 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:29:57,903 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807378, jitterRate=0.02663518488407135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:29:57,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188597881Initializing all the Stores at 1732188597882 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188597882Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188597883 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188597883Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188597883Cleaning up temporary data from old regions at 1732188597895 (+12 ms)Region opened successfully at 1732188597905 (+10 ms) 2024-11-21T11:29:57,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:29:57,905 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:29:57,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:29:57,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:29:57,905 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:29:57,907 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:40563 2024-11-21T11:29:57,907 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:29:57,907 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:29:57,907 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:29:57,907 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:29:57,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188597905Disabling compacts and flushes for region at 1732188597905Disabling writes for close at 1732188597905Writing region close event to WAL at 1732188597907 (+2 ms)Closed at 1732188597907 2024-11-21T11:29:57,908 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,36257,1732188597601 with port=40563, startcode=1732188597662 2024-11-21T11:29:57,908 DEBUG [RS:0;7b462513bfc2:40563 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:29:57,910 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:57,910 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:29:57,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:29:57,910 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:29:57,911 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36257 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,911 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36257 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:29:57,913 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:29:57,914 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647 2024-11-21T11:29:57,914 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37309 2024-11-21T11:29:57,914 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:29:57,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:29:57,917 DEBUG [RS:0;7b462513bfc2:40563 {}] zookeeper.ZKUtil(111): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,917 WARN [RS:0;7b462513bfc2:40563 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:29:57,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,40563,1732188597662] 2024-11-21T11:29:57,917 INFO [RS:0;7b462513bfc2:40563 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:57,917 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,922 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:29:57,926 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:29:57,926 INFO [RS:0;7b462513bfc2:40563 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:29:57,926 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,926 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:29:57,927 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:29:57,927 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:57,928 DEBUG [RS:0;7b462513bfc2:40563 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,929 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40563,1732188597662-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:29:57,946 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:29:57,946 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40563,1732188597662-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,946 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,946 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.Replication(171): 7b462513bfc2,40563,1732188597662 started 2024-11-21T11:29:57,962 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:57,962 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,40563,1732188597662, RpcServer on 7b462513bfc2/172.17.0.2:40563, sessionid=0x1013a4a57de0001 2024-11-21T11:29:57,962 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:29:57,962 DEBUG [RS:0;7b462513bfc2:40563 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,962 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,40563,1732188597662' 2024-11-21T11:29:57,962 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,40563,1732188597662 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,40563,1732188597662' 2024-11-21T11:29:57,963 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:29:57,964 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:29:57,964 DEBUG [RS:0;7b462513bfc2:40563 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:29:57,965 INFO [RS:0;7b462513bfc2:40563 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:29:57,965 INFO [RS:0;7b462513bfc2:40563 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:29:58,064 WARN [7b462513bfc2:36257 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:29:58,067 INFO [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C40563%2C1732188597662, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs, maxLogs=32 2024-11-21T11:29:58,069 INFO [RS:0;7b462513bfc2:40563 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188598068 2024-11-21T11:29:58,076 INFO [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 2024-11-21T11:29:58,077 DEBUG [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34243:34243),(127.0.0.1/127.0.0.1:46191:46191)] 2024-11-21T11:29:58,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:29:58,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:29:58,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-21T11:29:58,314 DEBUG [7b462513bfc2:36257 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:29:58,315 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:58,316 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,40563,1732188597662, state=OPENING 2024-11-21T11:29:58,318 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:29:58,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:58,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:29:58,321 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:29:58,322 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,40563,1732188597662}] 2024-11-21T11:29:58,322 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:58,322 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:58,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,476 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:29:58,478 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36351, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:29:58,483 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:29:58,483 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:58,485 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C40563%2C1732188597662.meta, suffix=.meta, logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs, maxLogs=32 2024-11-21T11:29:58,486 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta 2024-11-21T11:29:58,492 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta 2024-11-21T11:29:58,496 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46191:46191),(127.0.0.1/127.0.0.1:34243:34243)] 2024-11-21T11:29:58,497 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:29:58,498 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:29:58,498 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:29:58,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:29:58,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:29:58,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:58,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:58,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:29:58,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:29:58,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:58,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:58,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:29:58,505 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:29:58,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:58,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:58,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:29:58,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:29:58,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:58,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:29:58,508 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:29:58,509 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740 2024-11-21T11:29:58,510 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740 2024-11-21T11:29:58,511 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:29:58,511 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:29:58,512 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:29:58,513 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:29:58,514 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712632, jitterRate=-0.09384280443191528}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:29:58,514 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:29:58,515 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188598498Writing region info on filesystem at 1732188598498Initializing all the Stores at 1732188598499 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188598499Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188598500 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188598500Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188598500Cleaning up temporary data from old regions at 1732188598511 (+11 ms)Running coprocessor post-open hooks at 1732188598514 (+3 ms)Region opened successfully at 1732188598515 (+1 ms) 2024-11-21T11:29:58,517 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188598476 2024-11-21T11:29:58,520 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:29:58,520 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:29:58,521 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:58,523 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,40563,1732188597662, state=OPEN 2024-11-21T11:29:58,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:29:58,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:29:58,532 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:58,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:58,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:29:58,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:29:58,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,40563,1732188597662 in 211 msec 2024-11-21T11:29:58,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:29:58,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-11-21T11:29:58,541 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:29:58,541 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:29:58,543 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:29:58,543 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,40563,1732188597662, seqNum=-1] 2024-11-21T11:29:58,543 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:29:58,545 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50941, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:29:58,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 695 msec 2024-11-21T11:29:58,552 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188598552, completionTime=-1 2024-11-21T11:29:58,552 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:29:58,552 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188658555 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188718555 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:36257, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,557 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.845sec 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:29:58,560 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:29:58,563 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:29:58,563 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:29:58,563 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,36257,1732188597601-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd7b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:58,580 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,36257,-1 for getting cluster id 2024-11-21T11:29:58,580 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:29:58,582 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c16bed1d-0b23-4081-bb4d-70a5e054b5c7' 2024-11-21T11:29:58,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:29:58,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c16bed1d-0b23-4081-bb4d-70a5e054b5c7" 2024-11-21T11:29:58,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@121e2525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:58,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,36257,-1] 2024-11-21T11:29:58,584 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:29:58,584 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:29:58,586 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59606, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:29:58,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@abbe752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:29:58,588 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:29:58,590 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,40563,1732188597662, seqNum=-1] 2024-11-21T11:29:58,590 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:29:58,592 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:29:58,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,36257,1732188597601 2024-11-21T11:29:58,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:58,598 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:29:58,648 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:29:58,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:58,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:58,649 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:29:58,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:29:58,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:29:58,649 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:29:58,649 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:29:58,650 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39231 2024-11-21T11:29:58,651 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39231 connecting to ZooKeeper ensemble=127.0.0.1:52013 2024-11-21T11:29:58,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:58,653 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:29:58,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392310x0, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:29:58,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-21T11:29:58,658 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39231-0x1013a4a57de0002 connected 2024-11-21T11:29:58,658 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-21T11:29:58,658 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:29:58,659 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:29:58,659 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:29:58,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:29:58,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-21T11:29:58,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39231 2024-11-21T11:29:58,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39231 2024-11-21T11:29:58,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-21T11:29:58,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39231 2024-11-21T11:29:58,668 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(746): ClusterId : c16bed1d-0b23-4081-bb4d-70a5e054b5c7 2024-11-21T11:29:58,669 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:29:58,672 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:29:58,672 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:29:58,674 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:29:58,675 DEBUG [RS:1;7b462513bfc2:39231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8abfd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:29:58,693 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7b462513bfc2:39231 2024-11-21T11:29:58,694 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:29:58,694 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:29:58,694 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:29:58,694 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,36257,1732188597601 with port=39231, startcode=1732188598648 2024-11-21T11:29:58,695 DEBUG [RS:1;7b462513bfc2:39231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:29:58,697 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34625, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:29:58,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36257 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36257 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,699 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647 2024-11-21T11:29:58,699 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37309 2024-11-21T11:29:58,699 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:29:58,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:29:58,701 DEBUG [RS:1;7b462513bfc2:39231 {}] zookeeper.ZKUtil(111): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,702 WARN [RS:1;7b462513bfc2:39231 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:29:58,702 INFO [RS:1;7b462513bfc2:39231 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:29:58,702 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,39231,1732188598648] 2024-11-21T11:29:58,702 DEBUG [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,716 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:29:58,722 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:29:58,722 INFO [RS:1;7b462513bfc2:39231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:29:58,722 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,723 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:29:58,724 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:29:58,724 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:29:58,724 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:58,725 DEBUG [RS:1;7b462513bfc2:39231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,730 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39231,1732188598648-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:29:58,753 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:29:58,753 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39231,1732188598648-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,753 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,754 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.Replication(171): 7b462513bfc2,39231,1732188598648 started 2024-11-21T11:29:58,772 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:29:58,772 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,39231,1732188598648, RpcServer on 7b462513bfc2/172.17.0.2:39231, sessionid=0x1013a4a57de0002 2024-11-21T11:29:58,772 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:29:58,772 DEBUG [RS:1;7b462513bfc2:39231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,772 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39231,1732188598648' 2024-11-21T11:29:58,772 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:29:58,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;7b462513bfc2:39231,5,FailOnTimeoutGroup] 2024-11-21T11:29:58,773 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:29:58,773 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,39231,1732188598648 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39231,1732188598648' 2024-11-21T11:29:58,773 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:29:58,774 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:29:58,774 DEBUG [RS:1;7b462513bfc2:39231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:29:58,774 INFO [RS:1;7b462513bfc2:39231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:29:58,774 INFO [RS:1;7b462513bfc2:39231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:29:58,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b462513bfc2,36257,1732188597601 2024-11-21T11:29:58,774 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65e2e1e0 2024-11-21T11:29:58,775 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T11:29:58,777 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59610, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T11:29:58,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-21T11:29:58,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-21T11:29:58,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:29:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-21T11:29:58,781 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T11:29:58,781 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:58,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-21T11:29:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:29:58,782 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T11:29:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741835_1011 (size=393) 2024-11-21T11:29:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741835_1011 (size=393) 2024-11-21T11:29:58,791 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5c2371c80dbf35b182839e38745bf682, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647 2024-11-21T11:29:58,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44593 is added to blk_1073741836_1012 (size=76) 2024-11-21T11:29:58,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39055 is added to blk_1073741836_1012 (size=76) 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 5c2371c80dbf35b182839e38745bf682, disabling compactions & flushes 2024-11-21T11:29:58,799 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. after waiting 0 ms 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:58,799 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:58,799 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5c2371c80dbf35b182839e38745bf682: Waiting for close lock at 1732188598799Disabling compacts and flushes for region at 1732188598799Disabling writes for close at 1732188598799Writing region close event to WAL at 1732188598799Closed at 1732188598799 2024-11-21T11:29:58,801 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T11:29:58,801 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732188598801"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188598801"}]},"ts":"1732188598801"} 2024-11-21T11:29:58,804 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T11:29:58,806 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T11:29:58,806 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188598806"}]},"ts":"1732188598806"} 2024-11-21T11:29:58,809 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-21T11:29:58,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5c2371c80dbf35b182839e38745bf682, ASSIGN}] 2024-11-21T11:29:58,811 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5c2371c80dbf35b182839e38745bf682, ASSIGN 2024-11-21T11:29:58,812 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5c2371c80dbf35b182839e38745bf682, ASSIGN; state=OFFLINE, location=7b462513bfc2,40563,1732188597662; forceNewPlan=false, retain=false 2024-11-21T11:29:58,870 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:29:58,872 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,877 INFO [RS:1;7b462513bfc2:39231 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C39231%2C1732188598648, suffix=, logDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648, archiveDir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs, maxLogs=32 2024-11-21T11:29:58,878 INFO [RS:1;7b462513bfc2:39231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39231%2C1732188598648.1732188598878 2024-11-21T11:29:58,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:29:58,915 INFO [RS:1;7b462513bfc2:39231 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 2024-11-21T11:29:58,916 DEBUG [RS:1;7b462513bfc2:39231 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46191:46191),(127.0.0.1/127.0.0.1:34243:34243)] 2024-11-21T11:29:58,963 INFO [7b462513bfc2:36257 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-21T11:29:58,963 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c2371c80dbf35b182839e38745bf682, regionState=OPENING, regionLocation=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:58,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5c2371c80dbf35b182839e38745bf682, ASSIGN because future has completed 2024-11-21T11:29:58,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c2371c80dbf35b182839e38745bf682, server=7b462513bfc2,40563,1732188597662}] 2024-11-21T11:29:59,126 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:59,127 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c2371c80dbf35b182839e38745bf682, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:29:59,127 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,127 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:29:59,128 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,128 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,132 INFO [StoreOpener-5c2371c80dbf35b182839e38745bf682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,134 INFO [StoreOpener-5c2371c80dbf35b182839e38745bf682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c2371c80dbf35b182839e38745bf682 columnFamilyName info 2024-11-21T11:29:59,134 DEBUG [StoreOpener-5c2371c80dbf35b182839e38745bf682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:29:59,135 INFO [StoreOpener-5c2371c80dbf35b182839e38745bf682-1 {}] regionserver.HStore(327): Store=5c2371c80dbf35b182839e38745bf682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:29:59,135 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,136 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,137 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,137 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,137 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,142 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,146 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:29:59,146 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5c2371c80dbf35b182839e38745bf682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812422, jitterRate=0.03304895758628845}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:29:59,147 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:29:59,148 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5c2371c80dbf35b182839e38745bf682: Running coprocessor pre-open hook at 1732188599128Writing region info on filesystem at 1732188599128Initializing all the Stores at 1732188599129 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188599129Cleaning up temporary data from old regions at 1732188599137 (+8 ms)Running coprocessor post-open hooks at 1732188599147 (+10 ms)Region opened successfully at 1732188599147 2024-11-21T11:29:59,150 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682., pid=6, masterSystemTime=1732188599121 2024-11-21T11:29:59,153 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:59,154 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:29:59,155 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c2371c80dbf35b182839e38745bf682, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,40563,1732188597662 2024-11-21T11:29:59,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c2371c80dbf35b182839e38745bf682, server=7b462513bfc2,40563,1732188597662 because future has completed 2024-11-21T11:29:59,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T11:29:59,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c2371c80dbf35b182839e38745bf682, server=7b462513bfc2,40563,1732188597662 in 192 msec 2024-11-21T11:29:59,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T11:29:59,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=5c2371c80dbf35b182839e38745bf682, ASSIGN in 354 msec 2024-11-21T11:29:59,169 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T11:29:59,170 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188599169"}]},"ts":"1732188599169"} 2024-11-21T11:29:59,174 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-21T11:29:59,175 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T11:29:59,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 397 msec 2024-11-21T11:30:03,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:30:03,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:03,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:03,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:03,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:03,922 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-21T11:30:08,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:30:08,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T11:30:08,295 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-21T11:30:08,295 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-21T11:30:08,296 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:30:08,296 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T11:30:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36257 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:30:08,889 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-21T11:30:08,889 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-21T11:30:08,894 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-21T11:30:08,894 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:08,922 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:08,927 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:08,935 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:08,935 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:08,936 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:08,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:08,937 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:09,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c5438f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-45895-hadoop-hdfs-3_4_1-tests_jar-_-any-10809792007400871526/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:09,106 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:45895} 2024-11-21T11:30:09,106 INFO [Time-limited test {}] server.Server(415): Started @116298ms 2024-11-21T11:30:09,108 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:09,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:09,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:09,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:09,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:09,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:09,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:09,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:09,239 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data5/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,240 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data6/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,291 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:09,295 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x33f1d9805b8f432e with lease ID 0x2edf52267c6427b5: Processing first storage report for DS-3907e662-8bcb-4880-aabc-dfdcc227ede0 from datanode DatanodeRegistration(127.0.0.1:44745, datanodeUuid=1fbd9aed-b95f-4504-9603-ba5b010febf3, infoPort=35017, infoSecurePort=0, ipcPort=33907, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,295 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33f1d9805b8f432e with lease ID 0x2edf52267c6427b5: from storage DS-3907e662-8bcb-4880-aabc-dfdcc227ede0 node DatanodeRegistration(127.0.0.1:44745, datanodeUuid=1fbd9aed-b95f-4504-9603-ba5b010febf3, infoPort=35017, infoSecurePort=0, ipcPort=33907, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,296 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x33f1d9805b8f432e with lease ID 0x2edf52267c6427b5: Processing first storage report for DS-ff0d5402-c504-4c3d-b91b-751fb6caf305 from datanode DatanodeRegistration(127.0.0.1:44745, datanodeUuid=1fbd9aed-b95f-4504-9603-ba5b010febf3, infoPort=35017, infoSecurePort=0, ipcPort=33907, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,296 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x33f1d9805b8f432e with lease ID 0x2edf52267c6427b5: from storage DS-ff0d5402-c504-4c3d-b91b-751fb6caf305 node DatanodeRegistration(127.0.0.1:44745, datanodeUuid=1fbd9aed-b95f-4504-9603-ba5b010febf3, infoPort=35017, infoSecurePort=0, ipcPort=33907, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39835cdd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-33885-hadoop-hdfs-3_4_1-tests_jar-_-any-9018288792625002965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:09,346 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:33885} 2024-11-21T11:30:09,346 INFO [Time-limited test {}] server.Server(415): Started @116538ms 2024-11-21T11:30:09,347 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:09,404 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:09,412 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:09,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:09,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:09,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:09,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:09,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:09,481 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data7/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,485 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data8/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,554 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:09,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4d59571afa492da with lease ID 0x2edf52267c6427b6: Processing first storage report for DS-83832c2b-b799-4173-8bc4-8704e1ddd612 from datanode DatanodeRegistration(127.0.0.1:38267, datanodeUuid=8d4bcb9e-9acf-4089-9143-4f0ca2634952, infoPort=38447, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4d59571afa492da with lease ID 0x2edf52267c6427b6: from storage DS-83832c2b-b799-4173-8bc4-8704e1ddd612 node DatanodeRegistration(127.0.0.1:38267, datanodeUuid=8d4bcb9e-9acf-4089-9143-4f0ca2634952, infoPort=38447, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4d59571afa492da with lease ID 0x2edf52267c6427b6: Processing first storage report for DS-96cba4ee-eb90-4feb-9319-8b42649a4c45 from datanode DatanodeRegistration(127.0.0.1:38267, datanodeUuid=8d4bcb9e-9acf-4089-9143-4f0ca2634952, infoPort=38447, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4d59571afa492da with lease ID 0x2edf52267c6427b6: from storage DS-96cba4ee-eb90-4feb-9319-8b42649a4c45 node DatanodeRegistration(127.0.0.1:38267, datanodeUuid=8d4bcb9e-9acf-4089-9143-4f0ca2634952, infoPort=38447, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,583 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ea37f0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-35059-hadoop-hdfs-3_4_1-tests_jar-_-any-15058238005560297535/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:09,584 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:35059} 2024-11-21T11:30:09,584 INFO [Time-limited test {}] server.Server(415): Started @116776ms 2024-11-21T11:30:09,586 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:09,716 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,716 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10/current/BP-677158924-172.17.0.2-1732188596698/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:09,743 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:09,746 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a77e257bc0ff49 with lease ID 0x2edf52267c6427b7: Processing first storage report for DS-a545e067-ef46-40d1-9dc5-40a9a06fc732 from datanode DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,746 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a77e257bc0ff49 with lease ID 0x2edf52267c6427b7: from storage DS-a545e067-ef46-40d1-9dc5-40a9a06fc732 node DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,747 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9a77e257bc0ff49 with lease ID 0x2edf52267c6427b7: Processing first storage report for DS-ecf77ba3-678a-40ae-aef3-e2c8cae1d65e from datanode DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698) 2024-11-21T11:30:09,747 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a77e257bc0ff49 with lease ID 0x2edf52267c6427b7: from storage DS-ecf77ba3-678a-40ae-aef3-e2c8cae1d65e node DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:09,824 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,824 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,824 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,824 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,825 WARN [PacketResponder: BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39055] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,826 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 block BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:09,827 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 block BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:09,827 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta block BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:09,827 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 block BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:09,827 WARN [PacketResponder: BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39055] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,827 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_247602001_22 at /127.0.0.1:33440 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33440 dst: /127.0.0.1:44593 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,827 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:46992 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46992 dst: /127.0.0.1:39055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-655749805_22 at /127.0.0.1:33492 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33492 dst: /127.0.0.1:44593 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_247602001_22 at /127.0.0.1:46948 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46948 dst: /127.0.0.1:39055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-655749805_22 at /127.0.0.1:47026 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47026 dst: /127.0.0.1:39055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:33476 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33476 dst: /127.0.0.1:44593 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:33472 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44593:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33472 dst: /127.0.0.1:44593 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21d5e4af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:09,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:46984 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39055:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46984 dst: /127.0.0.1:39055 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,832 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:09,833 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:09,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:09,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:09,838 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:09,838 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:09,838 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid ae719419-eba9-47fe-a9d9-6df1c759590a) service to localhost/127.0.0.1:37309 2024-11-21T11:30:09,838 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:09,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data3/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:09,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data4/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:09,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:09,841 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 block BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,844 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@1cd086eb {}] datanode.DataXceiver(331): 127.0.0.1:44593:DataXceiver error processing unknown operation src: /127.0.0.1:47068 dst: /127.0.0.1:44593 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:09,845 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 block BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,846 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta block BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ab5393f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:09,847 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 block BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f82e76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:09,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:09,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e7c448{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:09,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@257cf4bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:09,849 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:09,849 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:09,849 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:09,849 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid 41546d6a-8a61-4f57-a920-c2cab26d2b43) service to localhost/127.0.0.1:37309 2024-11-21T11:30:09,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data1/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:09,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data2/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:09,850 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:09,854 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682., hostname=7b462513bfc2,40563,1732188597662, seqNum=2] 2024-11-21T11:30:09,856 ERROR [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647-prefix:7b462513bfc2,40563,1732188597662 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,856 WARN [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647-prefix:7b462513bfc2,40563,1732188597662 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,856 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,857 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C40563%2C1732188597662:(num 1732188598068) roll requested 2024-11-21T11:30:09,857 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188609857 2024-11-21T11:30:09,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:09,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:09,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:09,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:09,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:09,871 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 2024-11-21T11:30:09,872 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,873 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:09,874 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-21T11:30:09,874 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-21T11:30:09,874 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 2024-11-21T11:30:09,876 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35017:35017),(127.0.0.1/127.0.0.1:38447:38447)] 2024-11-21T11:30:09,876 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:09,877 WARN [IPC Server handler 1 on default port 37309 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-21T11:30:09,882 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 after 6ms 2024-11-21T11:30:10,726 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:11,760 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:11,877 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:11,880 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 2024-11-21T11:30:11,880 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:11,881 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 block BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:11,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:51360 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:44745:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51360 dst: /127.0.0.1:44745 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:11,881 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:48136 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48136 dst: /127.0.0.1:38267 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:11,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c5438f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:11,883 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e7a8425{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:11,883 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:11,883 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51f59516{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:11,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36d0b5ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:11,884 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:11,885 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:11,885 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid 1fbd9aed-b95f-4504-9603-ba5b010febf3) service to localhost/127.0.0.1:37309 2024-11-21T11:30:11,885 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:11,885 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data5/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:11,885 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:11,886 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data6/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:12,726 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,761 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,878 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]] 2024-11-21T11:30:13,878 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,878 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C40563%2C1732188597662:(num 1732188609857) roll requested 2024-11-21T11:30:13,878 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188613878 2024-11-21T11:30:13,882 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,882 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:13,882 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741839_1021 2024-11-21T11:30:13,882 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 after 4008ms 2024-11-21T11:30:13,884 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:13,887 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,888 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:13,888 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741840_1022 2024-11-21T11:30:13,888 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:13,889 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:13,889 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:13,889 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741841_1023 2024-11-21T11:30:13,890 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:13,893 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T11:30:13,897 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:13,897 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:13,898 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:13,898 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:13,898 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:13,898 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188613878 2024-11-21T11:30:13,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38267 is added to blk_1073741838_1020 (size=3600) 2024-11-21T11:30:13,907 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39167:39167),(127.0.0.1/127.0.0.1:38447:38447)] 2024-11-21T11:30:13,907 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:13,907 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 is not closed yet, will try archiving it next time 2024-11-21T11:30:14,305 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:14,727 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,569 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c901f9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38267, datanodeUuid=8d4bcb9e-9acf-4089-9143-4f0ca2634952, infoPort=38447, infoSecurePort=0, ipcPort=34959, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741838_1020 to 127.0.0.1:44593 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,761 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,897 WARN [ResponseProcessor for block BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,897 WARN [DataStreamer for file /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188613878 block BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:15,897 WARN [PacketResponder: BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38267] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59636 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59636 dst: /127.0.0.1:44207 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:52834 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38267:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52834 dst: /127.0.0.1:38267 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39835cdd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:15,899 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12b19204{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:15,899 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:15,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3afd309b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:15,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@712f5f14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:15,901 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:15,901 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:15,901 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid 8d4bcb9e-9acf-4089-9143-4f0ca2634952) service to localhost/127.0.0.1:37309 2024-11-21T11:30:15,901 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:15,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data7/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:15,902 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data8/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:15,902 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:15,908 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]] 2024-11-21T11:30:15,908 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,908 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C40563%2C1732188597662:(num 1732188613878) roll requested 2024-11-21T11:30:15,908 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188615908 2024-11-21T11:30:15,911 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,912 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:15,912 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741843_1026 2024-11-21T11:30:15,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:15,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:30:15,912 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:15,914 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,914 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:15,914 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741844_1027 2024-11-21T11:30:15,915 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:15,916 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,917 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:15,917 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741845_1028 2024-11-21T11:30:15,918 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:15,921 WARN [Thread-921 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59654 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029 to mirror 127.0.0.1:44745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,921 WARN [Thread-921 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:15,921 WARN [Thread-921 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029 2024-11-21T11:30:15,921 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59654 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-21T11:30:15,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59654 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59654 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,922 WARN [Thread-921 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:15,923 WARN [IPC Server handler 1 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:15,923 WARN [IPC Server handler 1 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:15,923 WARN [IPC Server handler 1 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:15,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:15,929 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:15,930 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:15,930 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:15,930 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:15,930 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188613878 with entries=11, filesize=11.81 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188615908 2024-11-21T11:30:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741842_1025 (size=12106) 2024-11-21T11:30:15,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/7cfbc91bbd3f45ba8005162e66c70bf1 is 1080, key is row0002/info:/1732188611887/Put/seqid=0 2024-11-21T11:30:15,938 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59678 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031 to mirror 127.0.0.1:39055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,938 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:15,938 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031 2024-11-21T11:30:15,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59678 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:15,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59678 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59678 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,938 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:15,942 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39167:39167)] 2024-11-21T11:30:15,942 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:15,942 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188613878 is not closed yet, will try archiving it next time 2024-11-21T11:30:15,942 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59684 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032 to mirror 127.0.0.1:44745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,943 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:15,943 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032 2024-11-21T11:30:15,943 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59684 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:15,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59684 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59684 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,943 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:15,946 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59694 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,946 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:15,946 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033 2024-11-21T11:30:15,946 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59694 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:15,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59694 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59694 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,946 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:15,948 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44593 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:15,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59706 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034 to mirror 127.0.0.1:44593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,948 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:15,949 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034 2024-11-21T11:30:15,949 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59706 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:15,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59706 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59706 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:15,949 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:15,950 WARN [IPC Server handler 3 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:15,950 WARN [IPC Server handler 3 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:15,950 WARN [IPC Server handler 3 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:15,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741852_1035 (size=10347) 2024-11-21T11:30:16,333 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:16,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/7cfbc91bbd3f45ba8005162e66c70bf1 2024-11-21T11:30:16,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/7cfbc91bbd3f45ba8005162e66c70bf1 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1 2024-11-21T11:30:16,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1, entries=5, sequenceid=11, filesize=10.1 K 2024-11-21T11:30:16,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 5c2371c80dbf35b182839e38745bf682 in 457ms, sequenceid=11, compaction requested=false 2024-11-21T11:30:16,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:16,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:16,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-21T11:30:16,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/379d8d4d309e4645b4305f3d55bcf28e is 1080, key is row0007/info:/1732188615913/Put/seqid=0 2024-11-21T11:30:16,558 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:16,559 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:16,559 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741853_1036 2024-11-21T11:30:16,560 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:16,561 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:16,561 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:16,561 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741854_1037 2024-11-21T11:30:16,561 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:16,562 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:16,563 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:16,563 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741855_1038 2024-11-21T11:30:16,563 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:16,565 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:16,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59728 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:16,565 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:16,565 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039 2024-11-21T11:30:16,565 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59728 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:16,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59728 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59728 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:16,566 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:16,566 WARN [IPC Server handler 0 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:16,566 WARN [IPC Server handler 0 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:16,567 WARN [IPC Server handler 0 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:16,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741857_1040 (size=12506) 2024-11-21T11:30:16,727 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:16,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/379d8d4d309e4645b4305f3d55bcf28e 2024-11-21T11:30:16,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/379d8d4d309e4645b4305f3d55bcf28e as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e 2024-11-21T11:30:16,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e, entries=7, sequenceid=24, filesize=12.2 K 2024-11-21T11:30:16,983 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 5c2371c80dbf35b182839e38745bf682 in 432ms, sequenceid=24, compaction requested=false 2024-11-21T11:30:16,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:16,984 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-21T11:30:16,984 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:16,984 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e because midkey is the same as first or last row 2024-11-21T11:30:17,762 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,942 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]] 2024-11-21T11:30:17,942 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,942 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C40563%2C1732188597662:(num 1732188615908) roll requested 2024-11-21T11:30:17,943 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188617943 2024-11-21T11:30:17,946 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,946 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:17,946 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741858_1041 2024-11-21T11:30:17,947 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:17,948 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,948 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:17,948 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741859_1042 2024-11-21T11:30:17,949 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:17,950 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,950 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:17,950 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741860_1043 2024-11-21T11:30:17,951 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:17,953 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44593 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59748 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044 to mirror 127.0.0.1:44593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,953 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:17,953 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044 2024-11-21T11:30:17,953 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59748 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-21T11:30:17,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59748 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59748 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,954 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:17,954 WARN [IPC Server handler 1 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:17,954 WARN [IPC Server handler 1 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:17,954 WARN [IPC Server handler 1 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:17,957 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:17,957 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:17,957 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:17,957 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:17,957 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:17,957 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188615908 with entries=13, filesize=12.50 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188617943 2024-11-21T11:30:17,958 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39167:39167)] 2024-11-21T11:30:17,958 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:17,958 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188615908 is not closed yet, will try archiving it next time 2024-11-21T11:30:17,959 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188609857 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs/7b462513bfc2%2C40563%2C1732188597662.1732188609857 2024-11-21T11:30:17,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741847_1030 (size=12810) 2024-11-21T11:30:17,960 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188613878 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs/7b462513bfc2%2C40563%2C1732188597662.1732188613878 2024-11-21T11:30:17,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:17,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-21T11:30:17,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/f457d234a5fd420b8630039ccbf88cc1 is 1079, key is tmprow/info:/1732188617970/Put/seqid=0 2024-11-21T11:30:17,977 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,977 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:17,977 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741863_1046 2024-11-21T11:30:17,978 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:17,979 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,979 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:17,979 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741864_1047 2024-11-21T11:30:17,980 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:17,985 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59766 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048 to mirror 127.0.0.1:44745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,985 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:17,985 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048 2024-11-21T11:30:17,985 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59766 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:17,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59766 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59766 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,985 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:17,988 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44593 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:17,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59780 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049 to mirror 127.0.0.1:44593 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,988 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:17,988 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049 2024-11-21T11:30:17,988 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59780 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:17,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59780 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59780 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:17,988 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:17,989 WARN [IPC Server handler 4 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:17,989 WARN [IPC Server handler 4 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:17,989 WARN [IPC Server handler 4 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:17,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741867_1050 (size=6027) 2024-11-21T11:30:18,360 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 is not closed yet, will try archiving it next time 2024-11-21T11:30:18,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/f457d234a5fd420b8630039ccbf88cc1 2024-11-21T11:30:18,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/f457d234a5fd420b8630039ccbf88cc1 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1 2024-11-21T11:30:18,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1, entries=1, sequenceid=34, filesize=5.9 K 2024-11-21T11:30:18,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 5c2371c80dbf35b182839e38745bf682 in 437ms, sequenceid=34, compaction requested=true 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e because midkey is the same as first or last row 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c2371c80dbf35b182839e38745bf682:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:30:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:30:18,409 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:30:18,410 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:30:18,410 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1541): 5c2371c80dbf35b182839e38745bf682/info is initiating minor compaction (all files) 2024-11-21T11:30:18,410 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5c2371c80dbf35b182839e38745bf682/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:18,410 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1] into tmpdir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp, totalSize=28.2 K 2024-11-21T11:30:18,411 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7cfbc91bbd3f45ba8005162e66c70bf1, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732188611887 2024-11-21T11:30:18,411 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting 379d8d4d309e4645b4305f3d55bcf28e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732188615913 2024-11-21T11:30:18,412 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting f457d234a5fd420b8630039ccbf88cc1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732188617970 2024-11-21T11:30:18,425 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c2371c80dbf35b182839e38745bf682#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:30:18,426 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/a9061a66b2494dc28215e6eab6b9b9bd is 1080, key is row0002/info:/1732188611887/Put/seqid=0 2024-11-21T11:30:18,428 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:18,428 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:18,428 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741868_1051 2024-11-21T11:30:18,429 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:18,432 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:18,431 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59804 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,432 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:18,432 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052 2024-11-21T11:30:18,432 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59804 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:18,432 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59804 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59804 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,432 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:18,435 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44745 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:18,435 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:18,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59810 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053 to mirror 127.0.0.1:44745 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,435 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053 2024-11-21T11:30:18,435 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59810 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:18,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59810 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59810 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,435 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:18,437 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:18,437 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:18,437 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741871_1054 2024-11-21T11:30:18,437 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:18,438 WARN [IPC Server handler 2 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:18,438 WARN [IPC Server handler 2 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:18,438 WARN [IPC Server handler 2 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741872_1055 (size=17994) 2024-11-21T11:30:18,728 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:18,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e4c6ecf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741842_1025 to 127.0.0.1:39055 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@408b8976[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741852_1035 to 127.0.0.1:44745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:18,849 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/a9061a66b2494dc28215e6eab6b9b9bd as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd 2024-11-21T11:30:18,856 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5c2371c80dbf35b182839e38745bf682/info of 5c2371c80dbf35b182839e38745bf682 into a9061a66b2494dc28215e6eab6b9b9bd(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:18,857 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682., storeName=5c2371c80dbf35b182839e38745bf682/info, priority=13, startTime=1732188618408; duration=0sec 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd because midkey is the same as first or last row 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd because midkey is the same as first or last row 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd because midkey is the same as first or last row 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:30:18,857 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c2371c80dbf35b182839e38745bf682:info 2024-11-21T11:30:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:19,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-21T11:30:19,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/dbef7f3fed844c7c9b78f8f7b74b4f57 is 1079, key is tmprow/info:/1732188619390/Put/seqid=0 2024-11-21T11:30:19,398 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:19,398 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:19,398 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741873_1056 2024-11-21T11:30:19,399 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:19,401 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39055 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:19,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59816 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057 to mirror 127.0.0.1:39055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:19,401 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]) is bad. 2024-11-21T11:30:19,401 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057 2024-11-21T11:30:19,401 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59816 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:19,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:59816 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59816 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:19,402 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39055,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK] 2024-11-21T11:30:19,403 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:19,403 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:19,403 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741875_1058 2024-11-21T11:30:19,404 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:19,405 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:19,405 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:19,405 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741876_1059 2024-11-21T11:30:19,405 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:19,406 WARN [IPC Server handler 3 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-21T11:30:19,406 WARN [IPC Server handler 3 on default port 37309 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-21T11:30:19,406 WARN [IPC Server handler 3 on default port 37309 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-21T11:30:19,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741877_1060 (size=6027) 2024-11-21T11:30:19,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@408b8976[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741857_1040 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:19,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e4c6ecf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741847_1030 to 127.0.0.1:44593 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:19,762 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:19,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/dbef7f3fed844c7c9b78f8f7b74b4f57 2024-11-21T11:30:19,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/dbef7f3fed844c7c9b78f8f7b74b4f57 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57 2024-11-21T11:30:19,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57, entries=1, sequenceid=45, filesize=5.9 K 2024-11-21T11:30:19,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 5c2371c80dbf35b182839e38745bf682 in 431ms, sequenceid=45, compaction requested=false 2024-11-21T11:30:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-21T11:30:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:19,824 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd because midkey is the same as first or last row 2024-11-21T11:30:19,959 WARN [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-21T11:30:19,959 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:20,007 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:20,010 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:20,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:20,011 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:20,011 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:20,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a2580d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:20,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23df32cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:20,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c1d8e25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/java.io.tmpdir/jetty-localhost-34995-hadoop-hdfs-3_4_1-tests_jar-_-any-11585404756048849759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:20,126 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5af75f98{HTTP/1.1, (http/1.1)}{localhost:34995} 2024-11-21T11:30:20,126 INFO [Time-limited test {}] server.Server(415): Started @127318ms 2024-11-21T11:30:20,128 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:20,236 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:20,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d1f3ee929d10d31 with lease ID 0x2edf52267c6427b8: from storage DS-302696ae-c9bb-4355-adf8-8397d225a3aa node DatanodeRegistration(127.0.0.1:33697, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=40925, infoSecurePort=0, ipcPort=37819, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T11:30:20,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d1f3ee929d10d31 with lease ID 0x2edf52267c6427b8: from storage DS-2094c6fb-1734-43c9-8fe7-0b2b7b482b54 node DatanodeRegistration(127.0.0.1:33697, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=40925, infoSecurePort=0, ipcPort=37819, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:20,728 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:21,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@408b8976[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741872_1055 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:21,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e4c6ecf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741867_1050 to 127.0.0.1:44745 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:21,762 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:21,959 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:22,729 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:22,748 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e4c6ecf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741877_1060 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:23,763 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:23,959 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:24,729 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:25,763 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:25,960 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:26,729 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,579 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:30:27,764 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,860 ERROR [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData-prefix:7b462513bfc2,36257,1732188597601 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,860 WARN [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData-prefix:7b462513bfc2,36257,1732188597601 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,860 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C36257%2C1732188597601:(num 1732188597780) roll requested 2024-11-21T11:30:27,860 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C36257%2C1732188597601.1732188627860 2024-11-21T11:30:27,863 WARN [Thread-1002 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,864 WARN [Thread-1002 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK], DatanodeInfoWithStorage[127.0.0.1:33697,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]) is bad. 2024-11-21T11:30:27,864 WARN [Thread-1002 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741878_1061 2024-11-21T11:30:27,864 WARN [Thread-1002 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK] 2024-11-21T11:30:27,866 WARN [Thread-1002 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,866 WARN [Thread-1002 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK]) is bad. 2024-11-21T11:30:27,866 WARN [Thread-1002 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741879_1062 2024-11-21T11:30:27,867 WARN [Thread-1002 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44745,DS-3907e662-8bcb-4880-aabc-dfdcc227ede0,DISK] 2024-11-21T11:30:27,868 WARN [Thread-1002 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,868 WARN [Thread-1002 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:27,868 WARN [Thread-1002 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741880_1063 2024-11-21T11:30:27,868 WARN [Thread-1002 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:27,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:27,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:27,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:27,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:27,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:27,874 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188627860 2024-11-21T11:30:27,874 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,874 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:27,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 2024-11-21T11:30:27,875 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40925:40925),(127.0.0.1/127.0.0.1:39167:39167)] 2024-11-21T11:30:27,875 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 is not closed yet, will try archiving it next time 2024-11-21T11:30:27,875 WARN [IPC Server handler 0 on default port 37309 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 has not been closed. Lease recovery is in progress. RecoveryId = 1065 for block blk_1073741830_1006 2024-11-21T11:30:27,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 after 0ms 2024-11-21T11:30:27,960 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:28,730 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:29,961 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:30,259 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@54a1ee7a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-677158924-172.17.0.2-1732188596698:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.net.ConnectException: Call From 7b462513bfc2/172.17.0.2 to localhost:45225 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-21T11:30:30,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741833_1019 (size=455) 2024-11-21T11:30:30,730 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:30,899 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188598068 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs/7b462513bfc2%2C40563%2C1732188597662.1732188598068 2024-11-21T11:30:30,901 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188615908 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs/7b462513bfc2%2C40563%2C1732188597662.1732188615908 2024-11-21T11:30:31,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/WALs/7b462513bfc2,36257,1732188597601/7b462513bfc2%2C36257%2C1732188597601.1732188597780 after 4001ms 2024-11-21T11:30:31,961 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:32,730 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741833_1019 (size=455) 2024-11-21T11:30:33,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741835_1011 (size=393) 2024-11-21T11:30:33,961 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:34,239 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f07e091[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33697, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=40925, infoSecurePort=0, ipcPort=37819, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741831_1007 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:34,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:30:34,731 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:35,577 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.1732188635577 2024-11-21T11:30:35,583 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:35,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:35,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:35,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:35,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:35,584 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188617943 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188635577 2024-11-21T11:30:35,585 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39167:39167),(127.0.0.1/127.0.0.1:40925:40925)] 2024-11-21T11:30:35,585 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188617943 is not closed yet, will try archiving it next time 2024-11-21T11:30:35,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741862_1045 (size=13591) 2024-11-21T11:30:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:35,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-21T11:30:35,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/77d7675ce6204984a351629610442bfe is 1080, key is row0013/info:/1732188635586/Put/seqid=0 2024-11-21T11:30:35,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741883_1067 (size=11421) 2024-11-21T11:30:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741883_1067 (size=11421) 2024-11-21T11:30:35,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/77d7675ce6204984a351629610442bfe 2024-11-21T11:30:35,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/77d7675ce6204984a351629610442bfe as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe 2024-11-21T11:30:35,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe, entries=6, sequenceid=55, filesize=11.2 K 2024-11-21T11:30:35,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 5c2371c80dbf35b182839e38745bf682 in 26ms, sequenceid=55, compaction requested=true 2024-11-21T11:30:35,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:35,624 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-21T11:30:35,624 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:35,625 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd because midkey is the same as first or last row 2024-11-21T11:30:35,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5c2371c80dbf35b182839e38745bf682:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:30:35,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:30:35,625 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:30:35,626 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:30:35,626 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1541): 5c2371c80dbf35b182839e38745bf682/info is initiating minor compaction (all files) 2024-11-21T11:30:35,626 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 5c2371c80dbf35b182839e38745bf682/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:35,626 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe] into tmpdir=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp, totalSize=34.6 K 2024-11-21T11:30:35,627 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting a9061a66b2494dc28215e6eab6b9b9bd, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732188611887 2024-11-21T11:30:35,627 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbef7f3fed844c7c9b78f8f7b74b4f57, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732188619390 2024-11-21T11:30:35,627 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77d7675ce6204984a351629610442bfe, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732188619796 2024-11-21T11:30:35,644 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5c2371c80dbf35b182839e38745bf682#info#compaction#24 average throughput is 17.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:30:35,644 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/ca8fbdd1fca14c0ab043caed6b9b8b13 is 1080, key is row0002/info:/1732188611887/Put/seqid=0 2024-11-21T11:30:35,647 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:35,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44814 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data4]'}, localName='127.0.0.1:33697', datanodeUuid='ae719419-eba9-47fe-a9d9-6df1c759590a', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:35,647 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33697,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:35,648 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068 2024-11-21T11:30:35,648 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44814 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:35,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44814 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:33697:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44814 dst: /127.0.0.1:33697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:35,648 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:35,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741885_1069 (size=23502) 2024-11-21T11:30:35,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741885_1069 (size=23502) 2024-11-21T11:30:35,659 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/ca8fbdd1fca14c0ab043caed6b9b8b13 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/ca8fbdd1fca14c0ab043caed6b9b8b13 2024-11-21T11:30:35,666 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5c2371c80dbf35b182839e38745bf682/info of 5c2371c80dbf35b182839e38745bf682 into ca8fbdd1fca14c0ab043caed6b9b8b13(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:30:35,666 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:35,666 INFO [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682., storeName=5c2371c80dbf35b182839e38745bf682/info, priority=13, startTime=1732188635625; duration=0sec 2024-11-21T11:30:35,666 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-21T11:30:35,666 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:35,666 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/ca8fbdd1fca14c0ab043caed6b9b8b13 because midkey is the same as first or last row 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/ca8fbdd1fca14c0ab043caed6b9b8b13 because midkey is the same as first or last row 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/ca8fbdd1fca14c0ab043caed6b9b8b13 because midkey is the same as first or last row 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:30:35,667 DEBUG [RS:0;7b462513bfc2:40563-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5c2371c80dbf35b182839e38745bf682:info 2024-11-21T11:30:35,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] regionserver.HRegion(8855): Flush requested on 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:35,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 5c2371c80dbf35b182839e38745bf682 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-21T11:30:35,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/bd7eaab8dc2b43ffaf30bfb2a85ee90f is 1080, key is row0018/info:/1732188635599/Put/seqid=0 2024-11-21T11:30:35,822 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:35,822 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:35,822 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741886_1070 2024-11-21T11:30:35,823 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:35,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741887_1071 (size=11421) 2024-11-21T11:30:35,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741887_1071 (size=11421) 2024-11-21T11:30:35,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/bd7eaab8dc2b43ffaf30bfb2a85ee90f 2024-11-21T11:30:35,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/.tmp/info/bd7eaab8dc2b43ffaf30bfb2a85ee90f as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/bd7eaab8dc2b43ffaf30bfb2a85ee90f 2024-11-21T11:30:35,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/bd7eaab8dc2b43ffaf30bfb2a85ee90f, entries=6, sequenceid=66, filesize=11.2 K 2024-11-21T11:30:35,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 5c2371c80dbf35b182839e38745bf682 in 29ms, sequenceid=66, compaction requested=false 2024-11-21T11:30:35,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 5c2371c80dbf35b182839e38745bf682: 2024-11-21T11:30:35,844 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-21T11:30:35,844 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:30:35,844 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/ca8fbdd1fca14c0ab043caed6b9b8b13 because midkey is the same as first or last row 2024-11-21T11:30:35,962 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-21T11:30:35,962 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:35,986 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.1732188617943 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs/7b462513bfc2%2C40563%2C1732188597662.1732188617943 2024-11-21T11:30:36,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:30:36,016 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:30:36,016 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:30:36,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:36,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:36,017 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:30:36,017 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:30:36,017 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1863833071, stopped=false 2024-11-21T11:30:36,017 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,36257,1732188597601 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:30:36,019 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:30:36,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:36,019 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:30:36,019 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:30:36,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:36,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:30:36,020 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,40563,1732188597662' ***** 2024-11-21T11:30:36,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:30:36,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:30:36,020 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:30:36,020 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,39231,1732188598648' ***** 2024-11-21T11:30:36,020 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:30:36,020 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:30:36,020 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:30:36,020 INFO [RS:0;7b462513bfc2:40563 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:30:36,020 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:30:36,020 INFO [RS:0;7b462513bfc2:40563 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(3091): Received CLOSE for 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,39231,1732188598648 2024-11-21T11:30:36,021 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7b462513bfc2:39231. 2024-11-21T11:30:36,021 DEBUG [RS:1;7b462513bfc2:39231 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:30:36,021 DEBUG [RS:1;7b462513bfc2:39231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,40563,1732188597662 2024-11-21T11:30:36,021 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,39231,1732188598648; all regions closed. 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:40563. 2024-11-21T11:30:36,021 DEBUG [RS:0;7b462513bfc2:40563 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:30:36,021 DEBUG [RS:0;7b462513bfc2:40563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:36,021 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5c2371c80dbf35b182839e38745bf682, disabling compactions & flushes 2024-11-21T11:30:36,021 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:30:36,021 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:30:36,021 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. after waiting 0 ms 2024-11-21T11:30:36,022 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:36,022 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:30:36,022 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,022 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,022 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,022 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,022 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T11:30:36,022 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 5c2371c80dbf35b182839e38745bf682=TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.} 2024-11-21T11:30:36,022 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe] to archive 2024-11-21T11:30:36,022 DEBUG [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5c2371c80dbf35b182839e38745bf682 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:30:36,022 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:30:36,022 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,022 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:30:36,023 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,023 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-21T11:30:36,023 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 2024-11-21T11:30:36,023 WARN [IPC Server handler 2 on default port 37309 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741837_1013 2024-11-21T11:30:36,023 ERROR [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647-prefix:7b462513bfc2,40563,1732188597662.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,023 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:30:36,023 WARN [FSHLog-0-hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647-prefix:7b462513bfc2,40563,1732188597662.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,023 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 after 0ms 2024-11-21T11:30:36,023 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C40563%2C1732188597662.meta:.meta(num 1732188598486) roll requested 2024-11-21T11:30:36,024 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40563%2C1732188597662.meta.1732188636024.meta 2024-11-21T11:30:36,026 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/7cfbc91bbd3f45ba8005162e66c70bf1 2024-11-21T11:30:36,026 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,026 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741888_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK], DatanodeInfoWithStorage[127.0.0.1:33697,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:36,027 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741888_1073 2024-11-21T11:30:36,027 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/379d8d4d309e4645b4305f3d55bcf28e 2024-11-21T11:30:36,027 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:36,028 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/a9061a66b2494dc28215e6eab6b9b9bd 2024-11-21T11:30:36,030 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/f457d234a5fd420b8630039ccbf88cc1 2024-11-21T11:30:36,031 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57 to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/dbef7f3fed844c7c9b78f8f7b74b4f57 2024-11-21T11:30:36,032 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe to hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/info/77d7675ce6204984a351629610442bfe 2024-11-21T11:30:36,033 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7b462513bfc2:36257 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T11:30:36,033 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [7cfbc91bbd3f45ba8005162e66c70bf1=10347, 379d8d4d309e4645b4305f3d55bcf28e=12506, a9061a66b2494dc28215e6eab6b9b9bd=17994, f457d234a5fd420b8630039ccbf88cc1=6027, dbef7f3fed844c7c9b78f8f7b74b4f57=6027, 77d7675ce6204984a351629610442bfe=11421] 2024-11-21T11:30:36,038 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,038 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,038 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,038 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,038 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,038 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188636024.meta 2024-11-21T11:30:36,039 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,039 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44593,DS-0e147be5-8c67-4fb3-8660-053a2b6d764e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,039 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta 2024-11-21T11:30:36,039 WARN [IPC Server handler 4 on default port 37309 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741834_1010 2024-11-21T11:30:36,039 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta after 0ms 2024-11-21T11:30:36,042 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/default/TestLogRolling-testLogRollOnDatanodeDeath/5c2371c80dbf35b182839e38745bf682/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-21T11:30:36,043 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:36,043 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5c2371c80dbf35b182839e38745bf682: Waiting for close lock at 1732188636021Running coprocessor pre-close hooks at 1732188636021Disabling compacts and flushes for region at 1732188636021Disabling writes for close at 1732188636022 (+1 ms)Writing region close event to WAL at 1732188636037 (+15 ms)Running coprocessor post-close hooks at 1732188636043 (+6 ms)Closed at 1732188636043 2024-11-21T11:30:36,043 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682. 2024-11-21T11:30:36,044 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39167:39167),(127.0.0.1/127.0.0.1:40925:40925)] 2024-11-21T11:30:36,044 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta is not closed yet, will try archiving it next time 2024-11-21T11:30:36,060 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/info/92a2b07d51d3454684d1969edf4818b9 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732188598777.5c2371c80dbf35b182839e38745bf682./info:regioninfo/1732188599155/Put/seqid=0 2024-11-21T11:30:36,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741890_1076 (size=7089) 2024-11-21T11:30:36,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741890_1076 (size=7089) 2024-11-21T11:30:36,066 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/info/92a2b07d51d3454684d1969edf4818b9 2024-11-21T11:30:36,085 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/ns/34808703c6f4437b8c16ee4b1bebe10f is 43, key is default/ns:d/1732188598545/Put/seqid=0 2024-11-21T11:30:36,088 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44864 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data4]'}, localName='127.0.0.1:33697', datanodeUuid='ae719419-eba9-47fe-a9d9-6df1c759590a', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:36,088 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33697,DS-302696ae-c9bb-4355-adf8-8397d225a3aa,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:36,088 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44864 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:36,088 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077 2024-11-21T11:30:36,088 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:44864 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:33697:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44864 dst: /127.0.0.1:33697 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:36,088 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741892_1078 (size=5153) 2024-11-21T11:30:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741892_1078 (size=5153) 2024-11-21T11:30:36,094 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/ns/34808703c6f4437b8c16ee4b1bebe10f 2024-11-21T11:30:36,114 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/table/797624ae94e44c39a51ca27325eca761 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732188599169/Put/seqid=0 2024-11-21T11:30:36,116 WARN [Thread-1065 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38267 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:36,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:45476 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10]'}, localName='127.0.0.1:44207', datanodeUuid='dace58ef-a230-4009-9eb1-a0895ea172a9', xmitsInProgress=0}:Exception transferring block BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079 to mirror 127.0.0.1:38267 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:36,117 WARN [Thread-1065 {}] hdfs.DataStreamer(1731): Error Recovery for BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44207,DS-a545e067-ef46-40d1-9dc5-40a9a06fc732,DISK], DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK]) is bad. 2024-11-21T11:30:36,117 WARN [Thread-1065 {}] hdfs.DataStreamer(1850): Abandoning BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079 2024-11-21T11:30:36,117 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:45476 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-21T11:30:36,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_771584330_22 at /127.0.0.1:45476 [Receiving block BP-677158924-172.17.0.2-1732188596698:blk_1073741893_1079] {}] datanode.DataXceiver(331): 127.0.0.1:44207:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45476 dst: /127.0.0.1:44207 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:36,117 WARN [Thread-1065 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38267,DS-83832c2b-b799-4173-8bc4-8704e1ddd612,DISK] 2024-11-21T11:30:36,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741894_1080 (size=5424) 2024-11-21T11:30:36,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741894_1080 (size=5424) 2024-11-21T11:30:36,127 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/table/797624ae94e44c39a51ca27325eca761 2024-11-21T11:30:36,135 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/info/92a2b07d51d3454684d1969edf4818b9 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/info/92a2b07d51d3454684d1969edf4818b9 2024-11-21T11:30:36,142 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/info/92a2b07d51d3454684d1969edf4818b9, entries=10, sequenceid=11, filesize=6.9 K 2024-11-21T11:30:36,143 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/ns/34808703c6f4437b8c16ee4b1bebe10f as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/ns/34808703c6f4437b8c16ee4b1bebe10f 2024-11-21T11:30:36,150 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/ns/34808703c6f4437b8c16ee4b1bebe10f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T11:30:36,151 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/.tmp/table/797624ae94e44c39a51ca27325eca761 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/table/797624ae94e44c39a51ca27325eca761 2024-11-21T11:30:36,157 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/table/797624ae94e44c39a51ca27325eca761, entries=2, sequenceid=11, filesize=5.3 K 2024-11-21T11:30:36,158 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-21T11:30:36,163 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T11:30:36,164 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:30:36,164 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:30:36,164 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188636022Running coprocessor pre-close hooks at 1732188636022Disabling compacts and flushes for region at 1732188636022Disabling writes for close at 1732188636022Obtaining lock to block concurrent updates at 1732188636023 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732188636023Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732188636023Flushing stores of hbase:meta,,1.1588230740 at 1732188636045 (+22 ms)Flushing 1588230740/info: creating writer at 1732188636045Flushing 1588230740/info: appending metadata at 1732188636060 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732188636060Flushing 1588230740/ns: creating writer at 1732188636071 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732188636085 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732188636085Flushing 1588230740/table: creating writer at 1732188636100 (+15 ms)Flushing 1588230740/table: appending metadata at 1732188636113 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732188636113Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1255aa8c: reopening flushed file at 1732188636133 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46ad5fa6: reopening flushed file at 1732188636142 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b16f1b7: reopening flushed file at 1732188636150 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1732188636158 (+8 ms)Writing region close event to WAL at 1732188636159 (+1 ms)Running coprocessor post-close hooks at 1732188636164 (+5 ms)Closed at 1732188636164 2024-11-21T11:30:36,164 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:30:36,222 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,40563,1732188597662; all regions closed. 2024-11-21T11:30:36,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:36,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741889_1074 (size=825) 2024-11-21T11:30:36,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741889_1074 (size=825) 2024-11-21T11:30:36,732 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:30:36,750 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e4c6ecf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44207, datanodeUuid=dace58ef-a230-4009-9eb1-a0895ea172a9, infoPort=39167, infoSecurePort=0, ipcPort=36891, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741862_1045 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:36,790 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T11:30:36,790 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T11:30:36,929 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T11:30:36,930 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T11:30:37,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@65bf7c3e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33697, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=40925, infoSecurePort=0, ipcPort=37819, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741836_1012 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:37,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:30:37,931 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:30:38,240 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6f07e091[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33697, datanodeUuid=ae719419-eba9-47fe-a9d9-6df1c759590a, infoPort=40925, infoSecurePort=0, ipcPort=37819, storageInfo=lv=-57;cid=testClusterID;nsid=962724275;c=1732188596698):Failed to transfer BP-677158924-172.17.0.2-1732188596698:blk_1073741828_1004 to 127.0.0.1:38267 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:38,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:30:38,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-21T11:30:38,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:30:38,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:30:38,661 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T11:30:38,661 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T11:30:39,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:30:39,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:30:40,025 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 after 4001ms 2024-11-21T11:30:40,040 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta after 4001ms 2024-11-21T11:30:40,263 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@17f32c6d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-677158924-172.17.0.2-1732188596698:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.net.ConnectException: Call From 7b462513bfc2/172.17.0.2 to localhost:45225 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-21T11:30:41,023 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-21T11:30:41,025 DEBUG [RS:1;7b462513bfc2:39231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C39231%2C1732188598648:(num 1732188598878) 2024-11-21T11:30:41,025 DEBUG [RS:1;7b462513bfc2:39231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:30:41,025 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:30:41,025 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:30:41,026 INFO [RS:1;7b462513bfc2:39231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:30:41,026 INFO [RS:1;7b462513bfc2:39231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39231 2024-11-21T11:30:41,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,39231,1732188598648 2024-11-21T11:30:41,028 INFO [RS:1;7b462513bfc2:39231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:30:41,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:30:41,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,39231,1732188598648] 2024-11-21T11:30:41,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:41,031 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,39231,1732188598648 already deleted, retry=false 2024-11-21T11:30:41,031 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,39231,1732188598648 expired; onlineServers=1 2024-11-21T11:30:41,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39231-0x1013a4a57de0002, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,129 INFO [RS:1;7b462513bfc2:39231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:30:41,129 INFO [RS:1;7b462513bfc2:39231 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,39231,1732188598648; zookeeper connection closed. 2024-11-21T11:30:41,130 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b28ebcf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b28ebcf 2024-11-21T11:30:41,224 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-21T11:30:41,227 DEBUG [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs 2024-11-21T11:30:41,227 INFO [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C40563%2C1732188597662.meta:.meta(num 1732188636024) 2024-11-21T11:30:41,228 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,228 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,228 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,228 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,228 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741882_1066 (size=16308) 2024-11-21T11:30:41,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741882_1066 (size=16308) 2024-11-21T11:30:41,232 DEBUG [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/oldWALs 2024-11-21T11:30:41,232 INFO [RS:0;7b462513bfc2:40563 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C40563%2C1732188597662:(num 1732188635577) 2024-11-21T11:30:41,232 DEBUG [RS:0;7b462513bfc2:40563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:41,232 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:30:41,232 INFO [RS:0;7b462513bfc2:40563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:30:41,232 INFO [RS:0;7b462513bfc2:40563 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:30:41,233 INFO [RS:0;7b462513bfc2:40563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:30:41,233 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:30:41,233 INFO [RS:0;7b462513bfc2:40563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40563 2024-11-21T11:30:41,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,40563,1732188597662 2024-11-21T11:30:41,235 INFO [RS:0;7b462513bfc2:40563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:30:41,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:30:41,235 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,40563,1732188597662] 2024-11-21T11:30:41,237 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,40563,1732188597662 already deleted, retry=false 2024-11-21T11:30:41,237 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,40563,1732188597662 expired; onlineServers=0 2024-11-21T11:30:41,237 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,36257,1732188597601' ***** 2024-11-21T11:30:41,237 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:30:41,237 INFO [M:0;7b462513bfc2:36257 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:30:41,237 INFO [M:0;7b462513bfc2:36257 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:30:41,237 DEBUG [M:0;7b462513bfc2:36257 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:30:41,237 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:30:41,237 DEBUG [M:0;7b462513bfc2:36257 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:30:41,237 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188597861 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188597861,5,FailOnTimeoutGroup] 2024-11-21T11:30:41,237 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188597861 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188597861,5,FailOnTimeoutGroup] 2024-11-21T11:30:41,238 INFO [M:0;7b462513bfc2:36257 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:30:41,238 INFO [M:0;7b462513bfc2:36257 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:30:41,238 DEBUG [M:0;7b462513bfc2:36257 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:30:41,238 INFO [M:0;7b462513bfc2:36257 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:30:41,238 INFO [M:0;7b462513bfc2:36257 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:30:41,238 INFO [M:0;7b462513bfc2:36257 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:30:41,238 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:30:41,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:30:41,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:41,240 DEBUG [M:0;7b462513bfc2:36257 {}] zookeeper.ZKUtil(347): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:30:41,240 WARN [M:0;7b462513bfc2:36257 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:30:41,240 INFO [M:0;7b462513bfc2:36257 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/.lastflushedseqids 2024-11-21T11:30:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741895_1081 (size=130) 2024-11-21T11:30:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741895_1081 (size=130) 2024-11-21T11:30:41,248 INFO [M:0;7b462513bfc2:36257 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:30:41,248 INFO [M:0;7b462513bfc2:36257 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:30:41,248 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:30:41,248 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:41,248 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:41,248 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:30:41,248 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:41,248 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-21T11:30:41,264 DEBUG [M:0;7b462513bfc2:36257 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56a8ec65403e4bfda72a1030c30ff39a is 82, key is hbase:meta,,1/info:regioninfo/1732188598521/Put/seqid=0 2024-11-21T11:30:41,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741896_1082 (size=5672) 2024-11-21T11:30:41,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741896_1082 (size=5672) 2024-11-21T11:30:41,270 INFO [M:0;7b462513bfc2:36257 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56a8ec65403e4bfda72a1030c30ff39a 2024-11-21T11:30:41,290 DEBUG [M:0;7b462513bfc2:36257 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df45c556871f44e8b9516799f325e4d4 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732188599178/Put/seqid=0 2024-11-21T11:30:41,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741897_1083 (size=6255) 2024-11-21T11:30:41,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741897_1083 (size=6255) 2024-11-21T11:30:41,296 INFO [M:0;7b462513bfc2:36257 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df45c556871f44e8b9516799f325e4d4 2024-11-21T11:30:41,300 INFO [M:0;7b462513bfc2:36257 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df45c556871f44e8b9516799f325e4d4 2024-11-21T11:30:41,314 DEBUG [M:0;7b462513bfc2:36257 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b33044c5244345759c828900e6a70de1 is 69, key is 7b462513bfc2,39231,1732188598648/rs:state/1732188598697/Put/seqid=0 2024-11-21T11:30:41,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741898_1084 (size=5224) 2024-11-21T11:30:41,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741898_1084 (size=5224) 2024-11-21T11:30:41,320 INFO [M:0;7b462513bfc2:36257 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b33044c5244345759c828900e6a70de1 2024-11-21T11:30:41,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,336 INFO [RS:0;7b462513bfc2:40563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:30:41,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40563-0x1013a4a57de0001, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,336 INFO [RS:0;7b462513bfc2:40563 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,40563,1732188597662; zookeeper connection closed. 2024-11-21T11:30:41,336 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56f58c95 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56f58c95 2024-11-21T11:30:41,337 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-21T11:30:41,344 DEBUG [M:0;7b462513bfc2:36257 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1510ecab7a494513b4a8e9b1c4d89137 is 52, key is load_balancer_on/state:d/1732188598596/Put/seqid=0 2024-11-21T11:30:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741899_1085 (size=5056) 2024-11-21T11:30:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741899_1085 (size=5056) 2024-11-21T11:30:41,349 INFO [M:0;7b462513bfc2:36257 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1510ecab7a494513b4a8e9b1c4d89137 2024-11-21T11:30:41,355 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56a8ec65403e4bfda72a1030c30ff39a as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56a8ec65403e4bfda72a1030c30ff39a 2024-11-21T11:30:41,360 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56a8ec65403e4bfda72a1030c30ff39a, entries=8, sequenceid=60, filesize=5.5 K 2024-11-21T11:30:41,361 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df45c556871f44e8b9516799f325e4d4 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df45c556871f44e8b9516799f325e4d4 2024-11-21T11:30:41,365 INFO [M:0;7b462513bfc2:36257 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df45c556871f44e8b9516799f325e4d4 2024-11-21T11:30:41,365 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df45c556871f44e8b9516799f325e4d4, entries=6, sequenceid=60, filesize=6.1 K 2024-11-21T11:30:41,366 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b33044c5244345759c828900e6a70de1 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b33044c5244345759c828900e6a70de1 2024-11-21T11:30:41,371 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b33044c5244345759c828900e6a70de1, entries=2, sequenceid=60, filesize=5.1 K 2024-11-21T11:30:41,372 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1510ecab7a494513b4a8e9b1c4d89137 as hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1510ecab7a494513b4a8e9b1c4d89137 2024-11-21T11:30:41,376 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1510ecab7a494513b4a8e9b1c4d89137, entries=1, sequenceid=60, filesize=4.9 K 2024-11-21T11:30:41,377 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-21T11:30:41,379 INFO [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:41,379 DEBUG [M:0;7b462513bfc2:36257 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188641248Disabling compacts and flushes for region at 1732188641248Disabling writes for close at 1732188641248Obtaining lock to block concurrent updates at 1732188641248Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188641248Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732188641249 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188641249Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188641249Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188641264 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188641264Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188641275 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188641290 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188641290Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188641300 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188641314 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188641314Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188641326 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188641343 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188641343Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27be7c65: reopening flushed file at 1732188641355 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e0a9ee9: reopening flushed file at 1732188641360 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fcd6d42: reopening flushed file at 1732188641365 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e378a4f: reopening flushed file at 1732188641371 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1732188641377 (+6 ms)Writing region close event to WAL at 1732188641379 (+2 ms)Closed at 1732188641379 2024-11-21T11:30:41,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,380 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,380 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,380 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,380 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:41,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44207 is added to blk_1073741881_1064 (size=1045) 2024-11-21T11:30:41,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33697 is added to blk_1073741881_1064 (size=1045) 2024-11-21T11:30:41,382 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:30:41,383 INFO [M:0;7b462513bfc2:36257 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:30:41,383 INFO [M:0;7b462513bfc2:36257 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36257 2024-11-21T11:30:41,383 INFO [M:0;7b462513bfc2:36257 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:30:41,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,485 INFO [M:0;7b462513bfc2:36257 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:30:41,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36257-0x1013a4a57de0000, quorum=127.0.0.1:52013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:30:41,487 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c1d8e25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:41,488 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5af75f98{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:41,488 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:41,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23df32cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:41,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a2580d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:41,489 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:41,489 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:41,489 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:41,489 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid ae719419-eba9-47fe-a9d9-6df1c759590a) service to localhost/127.0.0.1:37309 2024-11-21T11:30:41,489 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@348e1135 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:45225 , LocalHost:localPort 7b462513bfc2/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-21T11:30:41,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@348e1135 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-677158924-172.17.0.2-1732188596698:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33697,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-677158924-172.17.0.2-1732188596698 2024-11-21T11:30:41,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@348e1135 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:33697,null,null]) java.io.IOException: No block pool offer service for bpid=BP-677158924-172.17.0.2-1732188596698 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:41,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@348e1135 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44593,null,null]) java.io.IOException: No block pool offer service for bpid=BP-677158924-172.17.0.2-1732188596698 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:41,490 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data3/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:41,490 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@348e1135 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:33697,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-677158924-172.17.0.2-1732188596698:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:33697,null,null], DatanodeInfoWithStorage[127.0.0.1:44593,null,null]] 2024-11-21T11:30:41,490 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data4/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:41,491 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:41,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ea37f0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:41,493 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15010086{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:41,493 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:41,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e445ac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:41,493 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68004957{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:41,494 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:41,494 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:41,495 WARN [BP-677158924-172.17.0.2-1732188596698 heartbeating to localhost/127.0.0.1:37309 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-677158924-172.17.0.2-1732188596698 (Datanode Uuid dace58ef-a230-4009-9eb1-a0895ea172a9) service to localhost/127.0.0.1:37309 2024-11-21T11:30:41,495 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:41,495 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:41,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data9/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:41,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/cluster_904a987f-99de-5a48-6261-717b6374433a/data/data10/current/BP-677158924-172.17.0.2-1732188596698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:41,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cd2a640{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:30:41,501 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64b7b556{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:41,501 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:41,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ed3a961{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:41,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a488aac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:41,509 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:30:41,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:30:41,545 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 79) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007eff70bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37309 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37309 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37309 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007eff70bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37309 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37309 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=226 (was 200) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5164 (was 5101) - AvailableMemoryMB LEAK? - 2024-11-21T11:30:41,552 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=226, ProcessCount=11, AvailableMemoryMB=5165 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.log.dir so I do NOT create it in target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e243797-0e63-958f-14d9-eece334c3056/hadoop.tmp.dir so I do NOT create it in target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47, deleteOnExit=true 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/test.cache.data in system properties and HBase conf 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:30:41,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:30:41,553 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:30:41,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:30:41,567 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:30:41,567 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:30:41,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,586 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:41,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:41,672 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:41,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:41,673 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:41,673 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:41,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:41,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e56c5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:41,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c23e5e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:41,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@228e200c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-46417-hadoop-hdfs-3_4_1-tests_jar-_-any-16375597963653685263/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:30:41,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3045a903{HTTP/1.1, (http/1.1)}{localhost:46417} 2024-11-21T11:30:41,789 INFO [Time-limited test {}] server.Server(415): Started @148981ms 2024-11-21T11:30:41,802 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:30:41,875 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:41,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:41,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:41,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:41,879 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:41,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444db7a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:41,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77037455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:41,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66c0323e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-37911-hadoop-hdfs-3_4_1-tests_jar-_-any-6554732714132912581/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:41,997 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d2191b9{HTTP/1.1, (http/1.1)}{localhost:37911} 2024-11-21T11:30:41,997 INFO [Time-limited test {}] server.Server(415): Started @149189ms 2024-11-21T11:30:41,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:42,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:42,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:42,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:42,042 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:42,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:42,043 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:42,043 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:42,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23fcbb95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:42,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6699fa8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:42,104 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data2/current/BP-562595054-172.17.0.2-1732188641609/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:42,104 WARN [Thread-1187 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data1/current/BP-562595054-172.17.0.2-1732188641609/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:42,153 WARN [Thread-1166 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:42,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c09505d5608c4ad with lease ID 0xb9e18bb7b40bce: Processing first storage report for DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81 from datanode DatanodeRegistration(127.0.0.1:44069, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=40045, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609) 2024-11-21T11:30:42,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c09505d5608c4ad with lease ID 0xb9e18bb7b40bce: from storage DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81 node DatanodeRegistration(127.0.0.1:44069, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=40045, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:42,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4c09505d5608c4ad with lease ID 0xb9e18bb7b40bce: Processing first storage report for DS-de712c2f-75b0-44c2-8e3c-7858a8c76bfe from datanode DatanodeRegistration(127.0.0.1:44069, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=40045, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609) 2024-11-21T11:30:42,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4c09505d5608c4ad with lease ID 0xb9e18bb7b40bce: from storage DS-de712c2f-75b0-44c2-8e3c-7858a8c76bfe node DatanodeRegistration(127.0.0.1:44069, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=40045, infoSecurePort=0, ipcPort=42063, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:42,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33392a77{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-34349-hadoop-hdfs-3_4_1-tests_jar-_-any-4805060010423344357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:42,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57204301{HTTP/1.1, (http/1.1)}{localhost:34349} 2024-11-21T11:30:42,210 INFO [Time-limited test {}] server.Server(415): Started @149402ms 2024-11-21T11:30:42,211 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:42,311 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data3/current/BP-562595054-172.17.0.2-1732188641609/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:42,311 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data4/current/BP-562595054-172.17.0.2-1732188641609/current, will proceed with Du for space computation calculation, 2024-11-21T11:30:42,327 WARN [Thread-1202 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ecbb4ce1f2646c9 with lease ID 0xb9e18bb7b40bcf: Processing first storage report for DS-af6105f5-1587-4c68-9201-41f9c784ce0a from datanode DatanodeRegistration(127.0.0.1:37337, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=43875, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609) 2024-11-21T11:30:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ecbb4ce1f2646c9 with lease ID 0xb9e18bb7b40bcf: from storage DS-af6105f5-1587-4c68-9201-41f9c784ce0a node DatanodeRegistration(127.0.0.1:37337, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=43875, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1ecbb4ce1f2646c9 with lease ID 0xb9e18bb7b40bcf: Processing first storage report for DS-61b1f713-d650-4f27-a8cf-eeab0a57e811 from datanode DatanodeRegistration(127.0.0.1:37337, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=43875, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609) 2024-11-21T11:30:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ecbb4ce1f2646c9 with lease ID 0xb9e18bb7b40bcf: from storage DS-61b1f713-d650-4f27-a8cf-eeab0a57e811 node DatanodeRegistration(127.0.0.1:37337, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=43875, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:42,336 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2 2024-11-21T11:30:42,339 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/zookeeper_0, clientPort=62913, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:30:42,340 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62913 2024-11-21T11:30:42,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:30:42,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:30:42,351 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b with version=8 2024-11-21T11:30:42,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:30:42,353 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:30:42,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,353 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:30:42,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,354 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:30:42,354 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:30:42,354 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:30:42,354 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46423 2024-11-21T11:30:42,356 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46423 connecting to ZooKeeper ensemble=127.0.0.1:62913 2024-11-21T11:30:42,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:464230x0, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:30:42,362 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46423-0x1013a4b06b20000 connected 2024-11-21T11:30:42,375 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,376 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,378 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:30:42,378 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b, hbase.cluster.distributed=false 2024-11-21T11:30:42,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:30:42,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46423 2024-11-21T11:30:42,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46423 2024-11-21T11:30:42,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46423 2024-11-21T11:30:42,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46423 2024-11-21T11:30:42,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46423 2024-11-21T11:30:42,395 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:30:42,396 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:30:42,397 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35257 2024-11-21T11:30:42,398 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35257 connecting to ZooKeeper ensemble=127.0.0.1:62913 2024-11-21T11:30:42,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,400 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352570x0, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:30:42,405 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352570x0, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:30:42,405 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35257-0x1013a4b06b20001 connected 2024-11-21T11:30:42,405 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:30:42,406 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:30:42,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:30:42,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:30:42,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35257 2024-11-21T11:30:42,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35257 2024-11-21T11:30:42,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35257 2024-11-21T11:30:42,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35257 2024-11-21T11:30:42,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35257 2024-11-21T11:30:42,421 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:46423 2024-11-21T11:30:42,421 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:30:42,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:30:42,423 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:30:42,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,425 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:30:42,425 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,46423,1732188642353 from backup master directory 2024-11-21T11:30:42,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:30:42,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:30:42,426 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:30:42,426 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,431 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/hbase.id] with ID: 19768c38-2434-43ce-a7ce-6cd585b11e7c 2024-11-21T11:30:42,431 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/.tmp/hbase.id 2024-11-21T11:30:42,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:30:42,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:30:42,437 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/.tmp/hbase.id]:[hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/hbase.id] 2024-11-21T11:30:42,448 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:42,448 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:30:42,449 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:30:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:30:42,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:30:42,462 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:30:42,463 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:30:42,463 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:30:42,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:30:42,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:30:42,471 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store 2024-11-21T11:30:42,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:30:42,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:30:42,477 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:42,477 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:30:42,477 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:42,477 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:42,478 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:30:42,478 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:42,478 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:30:42,478 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188642477Disabling compacts and flushes for region at 1732188642477Disabling writes for close at 1732188642478 (+1 ms)Writing region close event to WAL at 1732188642478Closed at 1732188642478 2024-11-21T11:30:42,479 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/.initializing 2024-11-21T11:30:42,479 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,481 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C46423%2C1732188642353, suffix=, logDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353, archiveDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/oldWALs, maxLogs=10 2024-11-21T11:30:42,482 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46423%2C1732188642353.1732188642481 2024-11-21T11:30:42,486 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 2024-11-21T11:30:42,488 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43875:43875),(127.0.0.1/127.0.0.1:40045:40045)] 2024-11-21T11:30:42,488 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:30:42,489 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:42,489 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,489 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,491 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:30:42,491 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:42,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:30:42,493 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:30:42,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:30:42,494 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:30:42,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:30:42,496 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:30:42,496 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,497 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,497 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,498 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,498 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,499 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:30:42,500 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:30:42,502 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:30:42,503 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776713, jitterRate=-0.01235891878604889}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:30:42,504 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188642489Initializing all the Stores at 1732188642490 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188642490Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188642490Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188642490Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188642490Cleaning up temporary data from old regions at 1732188642498 (+8 ms)Region opened successfully at 1732188642503 (+5 ms) 2024-11-21T11:30:42,504 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:30:42,508 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35a2ad43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:30:42,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:30:42,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:30:42,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:30:42,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:30:42,511 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T11:30:42,511 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:30:42,511 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:30:42,513 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:30:42,514 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:30:42,515 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:30:42,516 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:30:42,516 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:30:42,517 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:30:42,518 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:30:42,519 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:30:42,520 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:30:42,521 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:30:42,524 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:30:42,526 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:30:42,527 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:30:42,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:30:42,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:30:42,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,530 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,46423,1732188642353, sessionid=0x1013a4b06b20000, setting cluster-up flag (Was=false) 2024-11-21T11:30:42,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,538 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:30:42,539 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:42,553 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:30:42,554 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,46423,1732188642353 2024-11-21T11:30:42,555 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:30:42,557 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:30:42,558 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:30:42,558 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:30:42,558 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,46423,1732188642353 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:30:42,559 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,560 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:30:42,560 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,561 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:30:42,562 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:30:42,563 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,563 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188672564 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:30:42,564 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,567 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:30:42,567 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:30:42,568 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:30:42,568 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:30:42,568 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:30:42,569 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188642568,5,FailOnTimeoutGroup] 2024-11-21T11:30:42,569 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188642569,5,FailOnTimeoutGroup] 2024-11-21T11:30:42,569 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,569 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:30:42,569 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,569 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:30:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:30:42,575 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:30:42,575 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b 2024-11-21T11:30:42,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:30:42,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:30:42,583 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:42,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:30:42,585 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:30:42,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:42,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:30:42,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:30:42,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:42,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:30:42,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:30:42,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:42,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:30:42,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:30:42,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:42,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:42,591 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:30:42,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740 2024-11-21T11:30:42,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740 2024-11-21T11:30:42,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:30:42,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:30:42,594 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:30:42,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:30:42,598 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:30:42,598 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712741, jitterRate=-0.09370362758636475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188642583Initializing all the Stores at 1732188642584 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188642584Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188642584Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188642584Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188642584Cleaning up temporary data from old regions at 1732188642594 (+10 ms)Region opened successfully at 1732188642599 (+5 ms) 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:30:42,599 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:30:42,599 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:30:42,599 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188642599Disabling compacts and flushes for region at 1732188642599Disabling writes for close at 1732188642599Writing region close event to WAL at 1732188642599Closed at 1732188642599 2024-11-21T11:30:42,601 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:30:42,601 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:30:42,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:30:42,602 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:30:42,603 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:30:42,611 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(746): ClusterId : 19768c38-2434-43ce-a7ce-6cd585b11e7c 2024-11-21T11:30:42,611 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:30:42,613 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:30:42,613 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:30:42,615 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:30:42,615 DEBUG [RS:0;7b462513bfc2:35257 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31d8883, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:30:42,627 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:35257 2024-11-21T11:30:42,627 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:30:42,627 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:30:42,627 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:30:42,627 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,46423,1732188642353 with port=35257, startcode=1732188642395 2024-11-21T11:30:42,628 DEBUG [RS:0;7b462513bfc2:35257 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:30:42,630 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:30:42,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46423 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46423 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,632 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b 2024-11-21T11:30:42,632 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39435 2024-11-21T11:30:42,632 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:30:42,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:30:42,634 DEBUG [RS:0;7b462513bfc2:35257 {}] zookeeper.ZKUtil(111): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,634 WARN [RS:0;7b462513bfc2:35257 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:30:42,634 INFO [RS:0;7b462513bfc2:35257 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:30:42,634 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,635 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,35257,1732188642395] 2024-11-21T11:30:42,638 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:30:42,639 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:30:42,640 INFO [RS:0;7b462513bfc2:35257 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:30:42,640 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,640 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:30:42,641 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:30:42,641 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,641 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:30:42,642 DEBUG [RS:0;7b462513bfc2:35257 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,644 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,35257,1732188642395-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:30:42,659 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:30:42,659 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,35257,1732188642395-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,659 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,659 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.Replication(171): 7b462513bfc2,35257,1732188642395 started 2024-11-21T11:30:42,672 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:42,673 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,35257,1732188642395, RpcServer on 7b462513bfc2/172.17.0.2:35257, sessionid=0x1013a4b06b20001 2024-11-21T11:30:42,673 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:30:42,673 DEBUG [RS:0;7b462513bfc2:35257 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,673 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,35257,1732188642395' 2024-11-21T11:30:42,673 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:30:42,673 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,35257,1732188642395 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,35257,1732188642395' 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:30:42,674 DEBUG [RS:0;7b462513bfc2:35257 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:30:42,674 INFO [RS:0;7b462513bfc2:35257 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:30:42,674 INFO [RS:0;7b462513bfc2:35257 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:30:42,754 WARN [7b462513bfc2:46423 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:30:42,777 INFO [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C35257%2C1732188642395, suffix=, logDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395, archiveDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs, maxLogs=32 2024-11-21T11:30:42,777 INFO [RS:0;7b462513bfc2:35257 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:30:42,783 INFO [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:30:42,784 DEBUG [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43875:43875),(127.0.0.1/127.0.0.1:40045:40045)] 2024-11-21T11:30:43,004 DEBUG [7b462513bfc2:46423 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:30:43,005 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:43,006 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,35257,1732188642395, state=OPENING 2024-11-21T11:30:43,008 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:30:43,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:43,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:30:43,010 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:30:43,010 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:30:43,010 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:30:43,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,35257,1732188642395}] 2024-11-21T11:30:43,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:43,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:43,164 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:30:43,166 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32877, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:30:43,170 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:30:43,170 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:30:43,171 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C35257%2C1732188642395.meta, suffix=.meta, logDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395, archiveDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs, maxLogs=32 2024-11-21T11:30:43,172 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta 2024-11-21T11:30:43,177 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta 2024-11-21T11:30:43,185 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40045:40045),(127.0.0.1/127.0.0.1:43875:43875)] 2024-11-21T11:30:43,186 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:30:43,186 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:30:43,186 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:30:43,186 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:30:43,186 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:30:43,186 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:43,187 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:30:43,187 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:30:43,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:30:43,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:30:43,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:43,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:30:43,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:30:43,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:43,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:30:43,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:30:43,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:43,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:30:43,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:30:43,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:30:43,193 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:30:43,193 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740 2024-11-21T11:30:43,194 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740 2024-11-21T11:30:43,196 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:30:43,196 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:30:43,196 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:30:43,198 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:30:43,198 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773130, jitterRate=-0.016914382576942444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:30:43,199 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:30:43,200 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188643187Writing region info on filesystem at 1732188643187Initializing all the Stores at 1732188643187Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188643187Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188643188 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188643188Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188643188Cleaning up temporary data from old regions at 1732188643196 (+8 ms)Running coprocessor post-open hooks at 1732188643199 (+3 ms)Region opened successfully at 1732188643199 2024-11-21T11:30:43,201 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188643163 2024-11-21T11:30:43,203 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:30:43,203 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:30:43,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:43,204 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,35257,1732188642395, state=OPEN 2024-11-21T11:30:43,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:30:43,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:30:43,209 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:43,209 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:30:43,209 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:30:43,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:30:43,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,35257,1732188642395 in 199 msec 2024-11-21T11:30:43,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:30:43,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-21T11:30:43,216 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:30:43,216 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:30:43,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:30:43,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,35257,1732188642395, seqNum=-1] 2024-11-21T11:30:43,218 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:30:43,219 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58175, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:30:43,225 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 667 msec 2024-11-21T11:30:43,225 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188643225, completionTime=-1 2024-11-21T11:30:43,226 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:30:43,226 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:30:43,227 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:30:43,227 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188703227 2024-11-21T11:30:43,227 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188763227 2024-11-21T11:30:43,227 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:46423, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,228 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,230 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.806sec 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:30:43,232 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:30:43,235 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:30:43,235 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:30:43,235 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46423,1732188642353-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:30:43,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6806ec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:30:43,312 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,46423,-1 for getting cluster id 2024-11-21T11:30:43,312 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:30:43,314 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '19768c38-2434-43ce-a7ce-6cd585b11e7c' 2024-11-21T11:30:43,314 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:30:43,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "19768c38-2434-43ce-a7ce-6cd585b11e7c" 2024-11-21T11:30:43,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8da49e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:30:43,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,46423,-1] 2024-11-21T11:30:43,315 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:30:43,316 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:30:43,317 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:30:43,318 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@220d6e9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:30:43,318 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:30:43,319 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,35257,1732188642395, seqNum=-1] 2024-11-21T11:30:43,320 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:30:43,321 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:30:43,323 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,46423,1732188642353 2024-11-21T11:30:43,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:30:43,326 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:30:43,326 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-21T11:30:43,326 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-21T11:30:43,326 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T11:30:43,327 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b462513bfc2,46423,1732188642353 2024-11-21T11:30:43,327 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f78b918 2024-11-21T11:30:43,327 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T11:30:43,329 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36686, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T11:30:43,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-21T11:30:43,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-21T11:30:43,330 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:30:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-21T11:30:43,333 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T11:30:43,333 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-21T11:30:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:30:43,334 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T11:30:43,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741835_1011 (size=395) 2024-11-21T11:30:43,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741835_1011 (size=395) 2024-11-21T11:30:43,343 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3ce34dc504a204da49ddededf18633a0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b 2024-11-21T11:30:43,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37337 is added to blk_1073741836_1012 (size=78) 2024-11-21T11:30:43,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44069 is added to blk_1073741836_1012 (size=78) 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 3ce34dc504a204da49ddededf18633a0, disabling compactions & flushes 2024-11-21T11:30:43,352 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. after waiting 0 ms 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,352 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,352 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3ce34dc504a204da49ddededf18633a0: Waiting for close lock at 1732188643352Disabling compacts and flushes for region at 1732188643352Disabling writes for close at 1732188643352Writing region close event to WAL at 1732188643352Closed at 1732188643352 2024-11-21T11:30:43,354 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T11:30:43,354 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732188643354"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188643354"}]},"ts":"1732188643354"} 2024-11-21T11:30:43,357 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T11:30:43,358 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T11:30:43,359 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188643358"}]},"ts":"1732188643358"} 2024-11-21T11:30:43,361 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-21T11:30:43,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3ce34dc504a204da49ddededf18633a0, ASSIGN}] 2024-11-21T11:30:43,363 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3ce34dc504a204da49ddededf18633a0, ASSIGN 2024-11-21T11:30:43,364 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3ce34dc504a204da49ddededf18633a0, ASSIGN; state=OFFLINE, location=7b462513bfc2,35257,1732188642395; forceNewPlan=false, retain=false 2024-11-21T11:30:43,515 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3ce34dc504a204da49ddededf18633a0, regionState=OPENING, regionLocation=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:43,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3ce34dc504a204da49ddededf18633a0, ASSIGN because future has completed 2024-11-21T11:30:43,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ce34dc504a204da49ddededf18633a0, server=7b462513bfc2,35257,1732188642395}] 2024-11-21T11:30:43,675 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,675 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3ce34dc504a204da49ddededf18633a0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:30:43,676 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,676 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:30:43,676 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,676 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,677 INFO [StoreOpener-3ce34dc504a204da49ddededf18633a0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,678 INFO [StoreOpener-3ce34dc504a204da49ddededf18633a0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3ce34dc504a204da49ddededf18633a0 columnFamilyName info 2024-11-21T11:30:43,678 DEBUG [StoreOpener-3ce34dc504a204da49ddededf18633a0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:30:43,679 INFO [StoreOpener-3ce34dc504a204da49ddededf18633a0-1 {}] regionserver.HStore(327): Store=3ce34dc504a204da49ddededf18633a0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:30:43,679 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,680 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,680 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,681 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,681 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,683 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,685 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:30:43,685 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3ce34dc504a204da49ddededf18633a0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854624, jitterRate=0.08671076595783234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:30:43,685 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:30:43,686 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3ce34dc504a204da49ddededf18633a0: Running coprocessor pre-open hook at 1732188643676Writing region info on filesystem at 1732188643676Initializing all the Stores at 1732188643677 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188643677Cleaning up temporary data from old regions at 1732188643681 (+4 ms)Running coprocessor post-open hooks at 1732188643685 (+4 ms)Region opened successfully at 1732188643686 (+1 ms) 2024-11-21T11:30:43,688 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0., pid=6, masterSystemTime=1732188643671 2024-11-21T11:30:43,690 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,690 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:43,691 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3ce34dc504a204da49ddededf18633a0, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,35257,1732188642395 2024-11-21T11:30:43,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ce34dc504a204da49ddededf18633a0, server=7b462513bfc2,35257,1732188642395 because future has completed 2024-11-21T11:30:43,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T11:30:43,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3ce34dc504a204da49ddededf18633a0, server=7b462513bfc2,35257,1732188642395 in 177 msec 2024-11-21T11:30:43,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T11:30:43,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=3ce34dc504a204da49ddededf18633a0, ASSIGN in 337 msec 2024-11-21T11:30:43,702 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T11:30:43,702 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188643702"}]},"ts":"1732188643702"} 2024-11-21T11:30:43,704 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-21T11:30:43,705 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T11:30:43,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 375 msec 2024-11-21T11:30:44,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:44,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:45,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:45,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:46,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:46,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:47,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:47,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:48,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:48,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:48,688 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:30:48,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,707 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:30:48,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:30:48,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T11:30:48,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-21T11:30:48,715 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-21T11:30:48,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:30:48,715 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T11:30:48,715 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T11:30:48,716 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-21T11:30:49,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:49,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:50,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:50,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:51,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:51,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:52,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:52,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:53,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:53,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46423 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:30:53,369 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-21T11:30:53,369 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-21T11:30:53,372 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-21T11:30:53,372 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:30:53,375 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0., hostname=7b462513bfc2,35257,1732188642395, seqNum=2] 2024-11-21T11:30:54,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:54,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:55,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:55,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:55,378 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:30:55,379 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,379 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,379 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,379 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK], DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]) is bad. 2024-11-21T11:30:55,379 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK], DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]) is bad. 2024-11-21T11:30:55,379 WARN [PacketResponder: BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37337] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1901550705_22 at /127.0.0.1:52654 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52654 dst: /127.0.0.1:37337 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK], DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37337,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]) is bad. 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1901550705_22 at /127.0.0.1:50126 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50126 dst: /127.0.0.1:44069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:50150 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50150 dst: /127.0.0.1:44069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:52696 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52696 dst: /127.0.0.1:37337 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:52706 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37337:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52706 dst: /127.0.0.1:37337 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,380 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:50146 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50146 dst: /127.0.0.1:44069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33392a77{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:55,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57204301{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:55,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:55,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6699fa8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:55,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23fcbb95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:55,384 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:55,384 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid 65542fea-e8e6-46b8-9728-fb6a63585a86) service to localhost/127.0.0.1:39435 2024-11-21T11:30:55,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:55,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:55,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data3/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:55,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data4/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:55,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:55,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:55,397 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:55,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:55,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:55,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:30:55,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70644e5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:55,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f09ac85{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:55,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@431e378c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-40081-hadoop-hdfs-3_4_1-tests_jar-_-any-977168283128480652/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:55,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ace5703{HTTP/1.1, (http/1.1)}{localhost:40081} 2024-11-21T11:30:55,512 INFO [Time-limited test {}] server.Server(415): Started @162704ms 2024-11-21T11:30:55,513 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:55,532 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,532 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,532 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:55,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:43154 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43154 dst: /127.0.0.1:44069 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:43158 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43158 dst: /127.0.0.1:44069 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1901550705_22 at /127.0.0.1:43162 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43162 dst: /127.0.0.1:44069 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:30:55,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66c0323e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:55,537 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d2191b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:30:55,537 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:30:55,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77037455{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:30:55,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444db7a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:30:55,538 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:30:55,538 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid a6556c12-2947-4038-9f7e-81235e5b9005) service to localhost/127.0.0.1:39435 2024-11-21T11:30:55,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:30:55,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:30:55,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data1/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:55,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data2/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:30:55,539 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:30:55,548 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:30:55,551 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:30:55,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:30:55,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:30:55,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:30:55,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d692efe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:30:55,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3977d0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:30:55,611 WARN [Thread-1337 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:55,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4aa47c7dc637ba with lease ID 0xb9e18bb7b40bd0: from storage DS-af6105f5-1587-4c68-9201-41f9c784ce0a node DatanodeRegistration(127.0.0.1:44725, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=37703, infoSecurePort=0, ipcPort=36699, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T11:30:55,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4aa47c7dc637ba with lease ID 0xb9e18bb7b40bd0: from storage DS-61b1f713-d650-4f27-a8cf-eeab0a57e811 node DatanodeRegistration(127.0.0.1:44725, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=37703, infoSecurePort=0, ipcPort=36699, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:55,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62f6e774{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-36577-hadoop-hdfs-3_4_1-tests_jar-_-any-3570582636779315902/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:30:55,671 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@173d3aba{HTTP/1.1, (http/1.1)}{localhost:36577} 2024-11-21T11:30:55,671 INFO [Time-limited test {}] server.Server(415): Started @162863ms 2024-11-21T11:30:55,672 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:30:55,775 WARN [Thread-1368 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:30:55,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x896f738518dac843 with lease ID 0xb9e18bb7b40bd1: from storage DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81 node DatanodeRegistration(127.0.0.1:44503, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=44009, infoSecurePort=0, ipcPort=39289, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:55,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x896f738518dac843 with lease ID 0xb9e18bb7b40bd1: from storage DS-de712c2f-75b0-44c2-8e3c-7858a8c76bfe node DatanodeRegistration(127.0.0.1:44503, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=44009, infoSecurePort=0, ipcPort=39289, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:30:56,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:56,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:56,689 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-21T11:30:56,692 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-21T11:30:56,693 ERROR [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:56,693 WARN [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:56,694 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C35257%2C1732188642395:(num 1732188642777) roll requested 2024-11-21T11:30:56,694 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:30:56,699 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 newFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:30:56,700 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:56,700 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:56,700 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:56,700 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:56,700 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:30:56,700 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:30:56,701 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:56,701 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:30:56,701 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:30:56,701 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44009:44009),(127.0.0.1/127.0.0.1:37703:37703)] 2024-11-21T11:30:56,701 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 is not closed yet, will try archiving it next time 2024-11-21T11:30:56,701 WARN [IPC Server handler 2 on default port 39435 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-21T11:30:56,701 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 after 0ms 2024-11-21T11:30:56,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44503 is added to blk_1073741833_1017 (size=1632) 2024-11-21T11:30:57,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:57,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:57,614 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-21T11:30:58,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:58,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:58,704 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-21T11:30:59,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:30:59,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:00,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:00,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:00,702 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 after 4001ms 2024-11-21T11:31:00,707 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:00,707 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44503,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK], DatanodeInfoWithStorage[127.0.0.1:44725,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44503,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]) is bad. 2024-11-21T11:31:00,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:59918 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44503:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59918 dst: /127.0.0.1:44503 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:00,708 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:48254 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48254 dst: /127.0.0.1:44725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:00,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62f6e774{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:00,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@173d3aba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:31:00,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:31:00,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3977d0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:31:00,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d692efe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:31:00,711 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:31:00,711 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid a6556c12-2947-4038-9f7e-81235e5b9005) service to localhost/127.0.0.1:39435 2024-11-21T11:31:00,711 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:31:00,712 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:31:00,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data1/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:00,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data2/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:00,713 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:31:00,720 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:00,723 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:31:00,724 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:31:00,724 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:31:00,724 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:31:00,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571de0fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:31:00,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6614137b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:31:00,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32c717fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-44455-hadoop-hdfs-3_4_1-tests_jar-_-any-11571189493734154618/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:00,839 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6299f50b{HTTP/1.1, (http/1.1)}{localhost:44455} 2024-11-21T11:31:00,839 INFO [Time-limited test {}] server.Server(415): Started @168031ms 2024-11-21T11:31:00,840 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:31:00,861 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:00,861 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1455043275_22 at /127.0.0.1:42270 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42270 dst: /127.0.0.1:44725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:00,864 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@431e378c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:00,865 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ace5703{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:31:00,865 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:31:00,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f09ac85{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:31:00,865 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70644e5e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:31:00,866 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:31:00,866 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:31:00,867 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:31:00,867 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid 65542fea-e8e6-46b8-9728-fb6a63585a86) service to localhost/127.0.0.1:39435 2024-11-21T11:31:00,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data3/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:00,867 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data4/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:00,868 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:31:00,878 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:00,882 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:31:00,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:31:00,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:31:00,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:31:00,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17312068{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:31:00,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e8914c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:31:00,964 WARN [Thread-1411 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:31:00,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x115388acb21709fc with lease ID 0xb9e18bb7b40bd2: from storage DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81 node DatanodeRegistration(127.0.0.1:33857, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=32939, infoSecurePort=0, ipcPort=42885, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:00,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x115388acb21709fc with lease ID 0xb9e18bb7b40bd2: from storage DS-de712c2f-75b0-44c2-8e3c-7858a8c76bfe node DatanodeRegistration(127.0.0.1:33857, datanodeUuid=a6556c12-2947-4038-9f7e-81235e5b9005, infoPort=32939, infoSecurePort=0, ipcPort=42885, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:01,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ebbad67{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/java.io.tmpdir/jetty-localhost-40529-hadoop-hdfs-3_4_1-tests_jar-_-any-15117296651982378891/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:01,033 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35936f2e{HTTP/1.1, (http/1.1)}{localhost:40529} 2024-11-21T11:31:01,033 INFO [Time-limited test {}] server.Server(415): Started @168225ms 2024-11-21T11:31:01,035 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:31:01,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:01,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:01,117 WARN [Thread-1442 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:31:01,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6be4b13981f86c62 with lease ID 0xb9e18bb7b40bd3: from storage DS-af6105f5-1587-4c68-9201-41f9c784ce0a node DatanodeRegistration(127.0.0.1:45237, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=38825, infoSecurePort=0, ipcPort=35499, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:01,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6be4b13981f86c62 with lease ID 0xb9e18bb7b40bd3: from storage DS-61b1f713-d650-4f27-a8cf-eeab0a57e811 node DatanodeRegistration(127.0.0.1:45237, datanodeUuid=65542fea-e8e6-46b8-9728-fb6a63585a86, infoPort=38825, infoSecurePort=0, ipcPort=35499, storageInfo=lv=-57;cid=testClusterID;nsid=1406459020;c=1732188641609), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:02,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:02,052 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-21T11:31:02,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:02,054 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-21T11:31:02,055 ERROR [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44725,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:02,055 WARN [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44725,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:02,055 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C35257%2C1732188642395:(num 1732188656694) roll requested 2024-11-21T11:31:02,056 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:02,061 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 newFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:02,061 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:02,061 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:02,061 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:02,061 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:02,061 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:02,062 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:02,062 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44725,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:02,062 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44725,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:02,062 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:31:02,062 WARN [IPC Server handler 0 on default port 39435 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-21T11:31:02,062 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38825:38825),(127.0.0.1/127.0.0.1:32939:32939)] 2024-11-21T11:31:02,062 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 is not closed yet, will try archiving it next time 2024-11-21T11:31:02,062 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 after 0ms 2024-11-21T11:31:03,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:03,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:04,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:04,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:04,064 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:04,069 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 newFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:04,070 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:04,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:04,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:04,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:04,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:04,070 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:04,071 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32939:32939),(127.0.0.1/127.0.0.1:38825:38825)] 2024-11-21T11:31:04,071 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 is not closed yet, will try archiving it next time 2024-11-21T11:31:04,071 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 is not closed yet, will try archiving it next time 2024-11-21T11:31:04,071 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:31:04,071 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:31:04,072 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 after 1ms 2024-11-21T11:31:04,072 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:31:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741838_1019 (size=1264) 2024-11-21T11:31:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741838_1019 (size=1264) 2024-11-21T11:31:04,073 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 is not closed yet, will try archiving it next time 2024-11-21T11:31:04,081 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732188643687/Put/vlen=218/seqid=0] 2024-11-21T11:31:04,081 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732188653376/Put/vlen=1045/seqid=0] 2024-11-21T11:31:04,082 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188642777 2024-11-21T11:31:04,082 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:31:04,082 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:31:04,082 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 after 0ms 2024-11-21T11:31:04,082 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:31:04,085 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732188656693/Put/vlen=1045/seqid=0] 2024-11-21T11:31:04,085 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732188658705/Put/vlen=1045/seqid=0] 2024-11-21T11:31:04,085 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 2024-11-21T11:31:04,085 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:04,085 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:04,085 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 after 0ms 2024-11-21T11:31:04,085 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188662055 2024-11-21T11:31:04,088 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732188662055/Put/vlen=1045/seqid=0] 2024-11-21T11:31:04,088 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:04,088 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:04,088 WARN [IPC Server handler 3 on default port 39435 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-21T11:31:04,089 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 after 1ms 2024-11-21T11:31:04,967 WARN [ResponseProcessor for block BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:04,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1901550705_22 at /127.0.0.1:50090 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50090 dst: /127.0.0.1:33857 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:33857 remote=/127.0.0.1:50090]. Total timeout mills is 60000, 59102 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:04,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1901550705_22 at /127.0.0.1:38068 [Receiving block BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45237:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38068 dst: /127.0.0.1:45237 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:04,967 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 block BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33857,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK], DatanodeInfoWithStorage[127.0.0.1:45237,DS-af6105f5-1587-4c68-9201-41f9c784ce0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33857,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]) is bad. 2024-11-21T11:31:04,968 WARN [DataStreamer for file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 block BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:04,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741839_1022 (size=85) 2024-11-21T11:31:04,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741839_1022 (size=85) 2024-11-21T11:31:05,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:05,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:05,967 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-21T11:31:06,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:06,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:06,063 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188656694 after 4001ms 2024-11-21T11:31:07,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:07,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:08,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:08,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:08,089 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 after 4001ms 2024-11-21T11:31:08,090 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:08,093 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:08,094 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3ce34dc504a204da49ddededf18633a0 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-21T11:31:08,094 ERROR [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,094 WARN [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,095 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C35257%2C1732188642395:(num 1732188664064) roll requested 2024-11-21T11:31:08,095 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.1732188668095 2024-11-21T11:31:08,100 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 newFile=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188668095 2024-11-21T11:31:08,100 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,100 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,101 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,101 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,101 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,101 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188668095 2024-11-21T11:31:08,101 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,101 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-562595054-172.17.0.2-1732188641609:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,102 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:08,102 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 after 0ms 2024-11-21T11:31:08,104 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.1732188664064 to hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs/7b462513bfc2%2C35257%2C1732188642395.1732188664064 2024-11-21T11:31:08,105 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38825:38825),(127.0.0.1/127.0.0.1:32939:32939)] 2024-11-21T11:31:08,120 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/.tmp/info/c12c42f6770e4d479a74f234d57fb259 is 1080, key is row1002/info:/1732188653376/Put/seqid=0 2024-11-21T11:31:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741841_1024 (size=9270) 2024-11-21T11:31:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741841_1024 (size=9270) 2024-11-21T11:31:08,128 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/.tmp/info/c12c42f6770e4d479a74f234d57fb259 2024-11-21T11:31:08,134 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/.tmp/info/c12c42f6770e4d479a74f234d57fb259 as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/info/c12c42f6770e4d479a74f234d57fb259 2024-11-21T11:31:08,140 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/info/c12c42f6770e4d479a74f234d57fb259, entries=4, sequenceid=8, filesize=9.1 K 2024-11-21T11:31:08,141 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 3ce34dc504a204da49ddededf18633a0 in 47ms, sequenceid=8, compaction requested=false 2024-11-21T11:31:08,141 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3ce34dc504a204da49ddededf18633a0: 2024-11-21T11:31:08,141 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-21T11:31:08,141 ERROR [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,142 WARN [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b-prefix:7b462513bfc2,35257,1732188642395.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,142 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C35257%2C1732188642395.meta:.meta(num 1732188643172) roll requested 2024-11-21T11:31:08,142 INFO [regionserver/7b462513bfc2:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C35257%2C1732188642395.meta.1732188668142.meta 2024-11-21T11:31:08,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,147 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,147 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,147 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,147 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,148 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188668142.meta 2024-11-21T11:31:08,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,148 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:08,148 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta 2024-11-21T11:31:08,149 WARN [IPC Server handler 1 on default port 39435 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-21T11:31:08,149 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta after 1ms 2024-11-21T11:31:08,152 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32939:32939),(127.0.0.1/127.0.0.1:38825:38825)] 2024-11-21T11:31:08,152 DEBUG [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta is not closed yet, will try archiving it next time 2024-11-21T11:31:08,168 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/info/68109ee06e084b3b9a09634a3b3e5750 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0./info:regioninfo/1732188643691/Put/seqid=0 2024-11-21T11:31:08,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741843_1027 (size=7125) 2024-11-21T11:31:08,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741843_1027 (size=7125) 2024-11-21T11:31:08,173 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/info/68109ee06e084b3b9a09634a3b3e5750 2024-11-21T11:31:08,193 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/ns/b594d6ca5a3944b78d3052024e47d86a is 43, key is default/ns:d/1732188643220/Put/seqid=0 2024-11-21T11:31:08,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741844_1028 (size=5153) 2024-11-21T11:31:08,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741844_1028 (size=5153) 2024-11-21T11:31:08,198 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/ns/b594d6ca5a3944b78d3052024e47d86a 2024-11-21T11:31:08,217 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/table/dbf4ee8de7184cb9936e0a4e596ea01e is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732188643702/Put/seqid=0 2024-11-21T11:31:08,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741845_1029 (size=5438) 2024-11-21T11:31:08,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741845_1029 (size=5438) 2024-11-21T11:31:08,222 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/table/dbf4ee8de7184cb9936e0a4e596ea01e 2024-11-21T11:31:08,228 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/info/68109ee06e084b3b9a09634a3b3e5750 as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/info/68109ee06e084b3b9a09634a3b3e5750 2024-11-21T11:31:08,233 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/info/68109ee06e084b3b9a09634a3b3e5750, entries=10, sequenceid=11, filesize=7.0 K 2024-11-21T11:31:08,234 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/ns/b594d6ca5a3944b78d3052024e47d86a as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/ns/b594d6ca5a3944b78d3052024e47d86a 2024-11-21T11:31:08,239 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/ns/b594d6ca5a3944b78d3052024e47d86a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T11:31:08,240 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/.tmp/table/dbf4ee8de7184cb9936e0a4e596ea01e as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/table/dbf4ee8de7184cb9936e0a4e596ea01e 2024-11-21T11:31:08,244 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/table/dbf4ee8de7184cb9936e0a4e596ea01e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-21T11:31:08,246 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-21T11:31:08,246 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-21T11:31:08,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:31:08,251 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:31:08,251 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:31:08,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:08,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:08,251 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:31:08,252 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:31:08,252 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1525007896, stopped=false 2024-11-21T11:31:08,252 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,46423,1732188642353 2024-11-21T11:31:08,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:31:08,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:31:08,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:08,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:08,253 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:31:08,254 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:31:08,254 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:31:08,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:08,254 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,35257,1732188642395' ***** 2024-11-21T11:31:08,254 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:31:08,254 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:31:08,254 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:31:08,254 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:31:08,255 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(3091): Received CLOSE for 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,35257,1732188642395 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:35257. 2024-11-21T11:31:08,255 DEBUG [RS:0;7b462513bfc2:35257 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:31:08,255 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3ce34dc504a204da49ddededf18633a0, disabling compactions & flushes 2024-11-21T11:31:08,255 DEBUG [RS:0;7b462513bfc2:35257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:08,255 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:31:08,255 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:31:08,255 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. after waiting 0 ms 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:31:08,255 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:31:08,255 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:31:08,256 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T11:31:08,256 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1325): Online Regions={3ce34dc504a204da49ddededf18633a0=TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T11:31:08,256 DEBUG [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3ce34dc504a204da49ddededf18633a0 2024-11-21T11:31:08,256 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:31:08,256 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:31:08,256 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:31:08,256 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:31:08,256 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:31:08,260 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/default/TestLogRolling-testLogRollOnPipelineRestart/3ce34dc504a204da49ddededf18633a0/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-21T11:31:08,260 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T11:31:08,261 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:31:08,261 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3ce34dc504a204da49ddededf18633a0: Waiting for close lock at 1732188668255Running coprocessor pre-close hooks at 1732188668255Disabling compacts and flushes for region at 1732188668255Disabling writes for close at 1732188668255Writing region close event to WAL at 1732188668256 (+1 ms)Running coprocessor post-close hooks at 1732188668260 (+4 ms)Closed at 1732188668260 2024-11-21T11:31:08,261 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732188643330.3ce34dc504a204da49ddededf18633a0. 2024-11-21T11:31:08,261 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:31:08,261 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:31:08,261 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188668256Running coprocessor pre-close hooks at 1732188668256Disabling compacts and flushes for region at 1732188668256Disabling writes for close at 1732188668256Writing region close event to WAL at 1732188668257 (+1 ms)Running coprocessor post-close hooks at 1732188668261 (+4 ms)Closed at 1732188668261 2024-11-21T11:31:08,261 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:31:08,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:31:08,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:31:08,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-21T11:31:08,456 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,35257,1732188642395; all regions closed. 2024-11-21T11:31:08,457 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,457 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,457 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,457 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,457 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:08,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741842_1025 (size=825) 2024-11-21T11:31:08,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741842_1025 (size=825) 2024-11-21T11:31:08,644 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T11:31:08,644 INFO [regionserver/7b462513bfc2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T11:31:08,645 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:31:09,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:09,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:10,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:10,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:11,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:11,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:12,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:12,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:12,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-21T11:31:12,149 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta after 4001ms 2024-11-21T11:31:12,150 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/WALs/7b462513bfc2,35257,1732188642395/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta to hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs/7b462513bfc2%2C35257%2C1732188642395.meta.1732188643172.meta 2024-11-21T11:31:12,152 DEBUG [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs 2024-11-21T11:31:12,152 INFO [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C35257%2C1732188642395.meta:.meta(num 1732188668142) 2024-11-21T11:31:12,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741840_1023 (size=1162) 2024-11-21T11:31:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741840_1023 (size=1162) 2024-11-21T11:31:12,159 DEBUG [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs 2024-11-21T11:31:12,159 INFO [RS:0;7b462513bfc2:35257 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C35257%2C1732188642395:(num 1732188668095) 2024-11-21T11:31:12,159 DEBUG [RS:0;7b462513bfc2:35257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:12,159 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:31:12,159 INFO [RS:0;7b462513bfc2:35257 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:31:12,160 INFO [RS:0;7b462513bfc2:35257 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:31:12,160 INFO [RS:0;7b462513bfc2:35257 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:31:12,160 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:31:12,160 INFO [RS:0;7b462513bfc2:35257 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35257 2024-11-21T11:31:12,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:31:12,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,35257,1732188642395 2024-11-21T11:31:12,162 INFO [RS:0;7b462513bfc2:35257 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:31:12,163 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,35257,1732188642395] 2024-11-21T11:31:12,165 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,35257,1732188642395 already deleted, retry=false 2024-11-21T11:31:12,165 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,35257,1732188642395 expired; onlineServers=0 2024-11-21T11:31:12,165 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,46423,1732188642353' ***** 2024-11-21T11:31:12,165 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:31:12,165 INFO [M:0;7b462513bfc2:46423 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:31:12,165 INFO [M:0;7b462513bfc2:46423 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:31:12,165 DEBUG [M:0;7b462513bfc2:46423 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:31:12,165 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:31:12,165 DEBUG [M:0;7b462513bfc2:46423 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:31:12,166 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188642569 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188642569,5,FailOnTimeoutGroup] 2024-11-21T11:31:12,166 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188642568 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188642568,5,FailOnTimeoutGroup] 2024-11-21T11:31:12,166 INFO [M:0;7b462513bfc2:46423 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:31:12,166 INFO [M:0;7b462513bfc2:46423 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:31:12,166 DEBUG [M:0;7b462513bfc2:46423 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:31:12,166 INFO [M:0;7b462513bfc2:46423 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:31:12,166 INFO [M:0;7b462513bfc2:46423 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:31:12,166 INFO [M:0;7b462513bfc2:46423 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:31:12,166 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:31:12,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:31:12,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:12,167 DEBUG [M:0;7b462513bfc2:46423 {}] zookeeper.ZKUtil(347): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:31:12,167 WARN [M:0;7b462513bfc2:46423 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:31:12,167 INFO [M:0;7b462513bfc2:46423 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/.lastflushedseqids 2024-11-21T11:31:12,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741846_1030 (size=130) 2024-11-21T11:31:12,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741846_1030 (size=130) 2024-11-21T11:31:12,173 INFO [M:0;7b462513bfc2:46423 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:31:12,173 INFO [M:0;7b462513bfc2:46423 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:31:12,173 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:31:12,173 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:12,173 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:12,173 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:31:12,173 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:12,173 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-21T11:31:12,174 ERROR [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData-prefix:7b462513bfc2,46423,1732188642353 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:12,174 WARN [FSHLog-0-hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData-prefix:7b462513bfc2,46423,1732188642353 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:12,174 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7b462513bfc2%2C46423%2C1732188642353:(num 1732188642481) roll requested 2024-11-21T11:31:12,174 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46423%2C1732188642353.1732188672174 2024-11-21T11:31:12,178 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,178 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,178 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,178 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,178 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,179 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188672174 2024-11-21T11:31:12,179 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:12,179 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44069,DS-7cd3b54e-0f43-477c-87e8-8f2682c18a81,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-21T11:31:12,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 2024-11-21T11:31:12,179 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38825:38825),(127.0.0.1/127.0.0.1:32939:32939)] 2024-11-21T11:31:12,179 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 is not closed yet, will try archiving it next time 2024-11-21T11:31:12,179 WARN [IPC Server handler 0 on default port 39435 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-21T11:31:12,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 after 1ms 2024-11-21T11:31:12,194 DEBUG [M:0;7b462513bfc2:46423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fd1f0fee24ad492c99bcab26eb37c475 is 82, key is hbase:meta,,1/info:regioninfo/1732188643203/Put/seqid=0 2024-11-21T11:31:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741848_1033 (size=5672) 2024-11-21T11:31:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741848_1033 (size=5672) 2024-11-21T11:31:12,199 INFO [M:0;7b462513bfc2:46423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fd1f0fee24ad492c99bcab26eb37c475 2024-11-21T11:31:12,218 DEBUG [M:0;7b462513bfc2:46423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd15818e41d34c108d51361117bc449b is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732188643706/Put/seqid=0 2024-11-21T11:31:12,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741849_1034 (size=6118) 2024-11-21T11:31:12,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741849_1034 (size=6118) 2024-11-21T11:31:12,223 INFO [M:0;7b462513bfc2:46423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd15818e41d34c108d51361117bc449b 2024-11-21T11:31:12,241 DEBUG [M:0;7b462513bfc2:46423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e6fdd2eadb549c9859c7d26360785aa is 69, key is 7b462513bfc2,35257,1732188642395/rs:state/1732188642631/Put/seqid=0 2024-11-21T11:31:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741850_1035 (size=5156) 2024-11-21T11:31:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741850_1035 (size=5156) 2024-11-21T11:31:12,246 INFO [M:0;7b462513bfc2:46423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e6fdd2eadb549c9859c7d26360785aa 2024-11-21T11:31:12,263 DEBUG [M:0;7b462513bfc2:46423 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0171ffbd6ca4ce79f3a73fc0ae272e9 is 52, key is load_balancer_on/state:d/1732188643325/Put/seqid=0 2024-11-21T11:31:12,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:31:12,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35257-0x1013a4b06b20001, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:31:12,263 INFO [RS:0;7b462513bfc2:35257 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:31:12,263 INFO [RS:0;7b462513bfc2:35257 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,35257,1732188642395; zookeeper connection closed. 2024-11-21T11:31:12,264 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1306b873 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1306b873 2024-11-21T11:31:12,264 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:31:12,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741851_1036 (size=5056) 2024-11-21T11:31:12,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741851_1036 (size=5056) 2024-11-21T11:31:12,267 INFO [M:0;7b462513bfc2:46423 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0171ffbd6ca4ce79f3a73fc0ae272e9 2024-11-21T11:31:12,272 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fd1f0fee24ad492c99bcab26eb37c475 as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fd1f0fee24ad492c99bcab26eb37c475 2024-11-21T11:31:12,276 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fd1f0fee24ad492c99bcab26eb37c475, entries=8, sequenceid=56, filesize=5.5 K 2024-11-21T11:31:12,277 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dd15818e41d34c108d51361117bc449b as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dd15818e41d34c108d51361117bc449b 2024-11-21T11:31:12,281 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dd15818e41d34c108d51361117bc449b, entries=6, sequenceid=56, filesize=6.0 K 2024-11-21T11:31:12,282 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1e6fdd2eadb549c9859c7d26360785aa as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e6fdd2eadb549c9859c7d26360785aa 2024-11-21T11:31:12,287 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1e6fdd2eadb549c9859c7d26360785aa, entries=1, sequenceid=56, filesize=5.0 K 2024-11-21T11:31:12,288 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f0171ffbd6ca4ce79f3a73fc0ae272e9 as hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0171ffbd6ca4ce79f3a73fc0ae272e9 2024-11-21T11:31:12,292 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f0171ffbd6ca4ce79f3a73fc0ae272e9, entries=1, sequenceid=56, filesize=4.9 K 2024-11-21T11:31:12,294 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=56, compaction requested=false 2024-11-21T11:31:12,295 INFO [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:12,295 DEBUG [M:0;7b462513bfc2:46423 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188672173Disabling compacts and flushes for region at 1732188672173Disabling writes for close at 1732188672173Obtaining lock to block concurrent updates at 1732188672173Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188672173Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732188672174 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188672180 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188672180Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188672194 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188672194Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188672204 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188672218 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188672218Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188672227 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188672240 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188672240Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188672250 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188672263 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188672263Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d45c210: reopening flushed file at 1732188672271 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22c88f58: reopening flushed file at 1732188672276 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3330a7f4: reopening flushed file at 1732188672282 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46a81e99: reopening flushed file at 1732188672287 (+5 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=56, compaction requested=false at 1732188672294 (+7 ms)Writing region close event to WAL at 1732188672295 (+1 ms)Closed at 1732188672295 2024-11-21T11:31:12,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:12,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33857 is added to blk_1073741847_1031 (size=757) 2024-11-21T11:31:12,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45237 is added to blk_1073741847_1031 (size=757) 2024-11-21T11:31:12,336 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:31:13,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:13,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:13,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,786 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:31:13,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,787 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:13,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:14,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:14,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:15,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:15,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:15,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-21T11:31:16,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:16,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:16,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 after 4001ms 2024-11-21T11:31:16,181 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/WALs/7b462513bfc2,46423,1732188642353/7b462513bfc2%2C46423%2C1732188642353.1732188642481 to hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/oldWALs/7b462513bfc2%2C46423%2C1732188642353.1732188642481 2024-11-21T11:31:16,184 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/MasterData/oldWALs/7b462513bfc2%2C46423%2C1732188642353.1732188642481 to hdfs://localhost:39435/user/jenkins/test-data/1f367572-b712-af8a-7157-93d45a0d2b6b/oldWALs/7b462513bfc2%2C46423%2C1732188642353.1732188642481$masterlocalwal$ 2024-11-21T11:31:16,184 INFO [M:0;7b462513bfc2:46423 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:31:16,184 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:31:16,184 INFO [M:0;7b462513bfc2:46423 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46423 2024-11-21T11:31:16,184 INFO [M:0;7b462513bfc2:46423 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:31:16,286 INFO [M:0;7b462513bfc2:46423 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:31:16,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:31:16,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46423-0x1013a4b06b20000, quorum=127.0.0.1:62913, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:31:16,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ebbad67{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:16,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35936f2e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:31:16,289 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:31:16,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e8914c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:31:16,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17312068{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:31:16,290 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:31:16,290 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:31:16,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:31:16,290 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid 65542fea-e8e6-46b8-9728-fb6a63585a86) service to localhost/127.0.0.1:39435 2024-11-21T11:31:16,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data3/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:16,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data4/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:16,292 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:31:16,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32c717fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:16,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6299f50b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:31:16,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:31:16,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6614137b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:31:16,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571de0fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:31:16,295 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:31:16,295 WARN [BP-562595054-172.17.0.2-1732188641609 heartbeating to localhost/127.0.0.1:39435 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-562595054-172.17.0.2-1732188641609 (Datanode Uuid a6556c12-2947-4038-9f7e-81235e5b9005) service to localhost/127.0.0.1:39435 2024-11-21T11:31:16,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data1/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:16,296 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/cluster_3bebd73c-2d26-d6bf-98f5-4cd57b784b47/data/data2/current/BP-562595054-172.17.0.2-1732188641609 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:31:16,296 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:31:16,296 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:31:16,296 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:31:16,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@228e200c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:31:16,302 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3045a903{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:31:16,302 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:31:16,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c23e5e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:31:16,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e56c5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir/,STOPPED} 2024-11-21T11:31:16,308 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:31:16,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:31:16,333 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 154) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39435 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39435 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39435 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39435 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39435 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=131 (was 226), ProcessCount=11 (was 11), AvailableMemoryMB=4988 (was 5165) 2024-11-21T11:31:16,340 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=131, ProcessCount=11, AvailableMemoryMB=4988 2024-11-21T11:31:16,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.log.dir so I do NOT create it in target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bcebb39c-5f79-f63a-f3e1-ccda123e77f2/hadoop.tmp.dir so I do NOT create it in target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751, deleteOnExit=true 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/test.cache.data in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:31:16,341 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:31:16,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:31:16,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:31:16,355 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:31:16,424 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:16,428 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:31:16,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:31:16,429 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:31:16,429 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:31:16,430 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:16,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d6dc4f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:31:16,431 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@294894f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:31:16,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e11e1e0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/java.io.tmpdir/jetty-localhost-46413-hadoop-hdfs-3_4_1-tests_jar-_-any-7725736889035035465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:31:16,544 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1159c3f2{HTTP/1.1, (http/1.1)}{localhost:46413} 2024-11-21T11:31:16,544 INFO [Time-limited test {}] server.Server(415): Started @183736ms 2024-11-21T11:31:16,557 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:31:16,615 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:16,618 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:31:16,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:31:16,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:31:16,619 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:31:16,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5838a3fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:31:16,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b93cee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:31:16,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fa04e54{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/java.io.tmpdir/jetty-localhost-38045-hadoop-hdfs-3_4_1-tests_jar-_-any-5155173335271251886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:16,741 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68744dfe{HTTP/1.1, (http/1.1)}{localhost:38045} 2024-11-21T11:31:16,741 INFO [Time-limited test {}] server.Server(415): Started @183933ms 2024-11-21T11:31:16,742 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:31:16,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:31:16,775 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:31:16,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:31:16,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:31:16,776 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:31:16,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@df163d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:31:16,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1367dc96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:31:16,853 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data1/current/BP-1453964250-172.17.0.2-1732188676372/current, will proceed with Du for space computation calculation, 2024-11-21T11:31:16,853 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data2/current/BP-1453964250-172.17.0.2-1732188676372/current, will proceed with Du for space computation calculation, 2024-11-21T11:31:16,870 WARN [Thread-1614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:31:16,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b0a29ff305ab1f5 with lease ID 0x6eca104bcfffafe9: Processing first storage report for DS-25ed6623-a8c4-44ce-930a-0cf6262b0307 from datanode DatanodeRegistration(127.0.0.1:40039, datanodeUuid=189ec7dc-c409-4486-8836-5ac4e5bdd230, infoPort=41731, infoSecurePort=0, ipcPort=46741, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372) 2024-11-21T11:31:16,873 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b0a29ff305ab1f5 with lease ID 0x6eca104bcfffafe9: from storage DS-25ed6623-a8c4-44ce-930a-0cf6262b0307 node DatanodeRegistration(127.0.0.1:40039, datanodeUuid=189ec7dc-c409-4486-8836-5ac4e5bdd230, infoPort=41731, infoSecurePort=0, ipcPort=46741, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:16,873 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b0a29ff305ab1f5 with lease ID 0x6eca104bcfffafe9: Processing first storage report for DS-f30833e7-196b-41b9-9200-43161921d757 from datanode DatanodeRegistration(127.0.0.1:40039, datanodeUuid=189ec7dc-c409-4486-8836-5ac4e5bdd230, infoPort=41731, infoSecurePort=0, ipcPort=46741, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372) 2024-11-21T11:31:16,873 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b0a29ff305ab1f5 with lease ID 0x6eca104bcfffafe9: from storage DS-f30833e7-196b-41b9-9200-43161921d757 node DatanodeRegistration(127.0.0.1:40039, datanodeUuid=189ec7dc-c409-4486-8836-5ac4e5bdd230, infoPort=41731, infoSecurePort=0, ipcPort=46741, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:16,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@206f042f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/java.io.tmpdir/jetty-localhost-46173-hadoop-hdfs-3_4_1-tests_jar-_-any-16040522953496799258/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:31:16,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@374dfdaf{HTTP/1.1, (http/1.1)}{localhost:46173} 2024-11-21T11:31:16,895 INFO [Time-limited test {}] server.Server(415): Started @184087ms 2024-11-21T11:31:16,896 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:31:17,011 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data3/current/BP-1453964250-172.17.0.2-1732188676372/current, will proceed with Du for space computation calculation, 2024-11-21T11:31:17,011 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data4/current/BP-1453964250-172.17.0.2-1732188676372/current, will proceed with Du for space computation calculation, 2024-11-21T11:31:17,034 WARN [Thread-1650 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:31:17,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa62d0f3d7d3efeef with lease ID 0x6eca104bcfffafea: Processing first storage report for DS-4535c2d1-6b3e-4e1d-800b-429fc264c956 from datanode DatanodeRegistration(127.0.0.1:42157, datanodeUuid=2eaf071e-39fb-4282-afd5-45759fd28ad5, infoPort=41993, infoSecurePort=0, ipcPort=35697, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372) 2024-11-21T11:31:17,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa62d0f3d7d3efeef with lease ID 0x6eca104bcfffafea: from storage DS-4535c2d1-6b3e-4e1d-800b-429fc264c956 node DatanodeRegistration(127.0.0.1:42157, datanodeUuid=2eaf071e-39fb-4282-afd5-45759fd28ad5, infoPort=41993, infoSecurePort=0, ipcPort=35697, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:17,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa62d0f3d7d3efeef with lease ID 0x6eca104bcfffafea: Processing first storage report for DS-62cad6e6-2bdf-4042-a952-edd73f5b8725 from datanode DatanodeRegistration(127.0.0.1:42157, datanodeUuid=2eaf071e-39fb-4282-afd5-45759fd28ad5, infoPort=41993, infoSecurePort=0, ipcPort=35697, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372) 2024-11-21T11:31:17,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa62d0f3d7d3efeef with lease ID 0x6eca104bcfffafea: from storage DS-62cad6e6-2bdf-4042-a952-edd73f5b8725 node DatanodeRegistration(127.0.0.1:42157, datanodeUuid=2eaf071e-39fb-4282-afd5-45759fd28ad5, infoPort=41993, infoSecurePort=0, ipcPort=35697, storageInfo=lv=-57;cid=testClusterID;nsid=657694560;c=1732188676372), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:31:17,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:17,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:17,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6 2024-11-21T11:31:17,122 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/zookeeper_0, clientPort=63013, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:31:17,123 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63013 2024-11-21T11:31:17,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:31:17,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:31:17,134 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2 with version=8 2024-11-21T11:31:17,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:31:17,136 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:31:17,136 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:31:17,137 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33889 2024-11-21T11:31:17,138 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33889 connecting to ZooKeeper ensemble=127.0.0.1:63013 2024-11-21T11:31:17,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338890x0, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:31:17,144 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33889-0x1013a4b8e910000 connected 2024-11-21T11:31:17,158 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,161 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:31:17,161 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2, hbase.cluster.distributed=false 2024-11-21T11:31:17,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:31:17,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33889 2024-11-21T11:31:17,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33889 2024-11-21T11:31:17,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33889 2024-11-21T11:31:17,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33889 2024-11-21T11:31:17,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33889 2024-11-21T11:31:17,179 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:31:17,179 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,179 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,179 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:31:17,180 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:31:17,180 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:31:17,180 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:31:17,180 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:31:17,180 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46293 2024-11-21T11:31:17,182 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46293 connecting to ZooKeeper ensemble=127.0.0.1:63013 2024-11-21T11:31:17,182 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,184 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462930x0, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:31:17,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462930x0, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:31:17,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46293-0x1013a4b8e910001 connected 2024-11-21T11:31:17,188 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:31:17,189 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:31:17,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:31:17,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:31:17,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46293 2024-11-21T11:31:17,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46293 2024-11-21T11:31:17,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46293 2024-11-21T11:31:17,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46293 2024-11-21T11:31:17,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46293 2024-11-21T11:31:17,202 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:33889 2024-11-21T11:31:17,203 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:31:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:31:17,211 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:31:17,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,213 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:31:17,213 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,33889,1732188677135 from backup master directory 2024-11-21T11:31:17,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:31:17,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:31:17,214 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:31:17,214 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,219 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/hbase.id] with ID: 0b1f9a07-9b9c-4dcb-8579-10b1b58e24b6 2024-11-21T11:31:17,219 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/.tmp/hbase.id 2024-11-21T11:31:17,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:31:17,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:31:17,225 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/.tmp/hbase.id]:[hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/hbase.id] 2024-11-21T11:31:17,236 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:17,237 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:31:17,238 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:31:17,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:31:17,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:31:17,248 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:31:17,249 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:31:17,249 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:31:17,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:31:17,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:31:17,258 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store 2024-11-21T11:31:17,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:31:17,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:31:17,264 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:17,264 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:31:17,264 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188677264Disabling compacts and flushes for region at 1732188677264Disabling writes for close at 1732188677264Writing region close event to WAL at 1732188677264Closed at 1732188677264 2024-11-21T11:31:17,265 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/.initializing 2024-11-21T11:31:17,265 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/WALs/7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,267 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C33889%2C1732188677135, suffix=, logDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/WALs/7b462513bfc2,33889,1732188677135, archiveDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/oldWALs, maxLogs=10 2024-11-21T11:31:17,268 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C33889%2C1732188677135.1732188677268 2024-11-21T11:31:17,272 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/WALs/7b462513bfc2,33889,1732188677135/7b462513bfc2%2C33889%2C1732188677135.1732188677268 2024-11-21T11:31:17,276 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:41731:41731)] 2024-11-21T11:31:17,276 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:31:17,277 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:17,277 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,277 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:31:17,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:31:17,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:31:17,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:31:17,283 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:31:17,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:31:17,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:31:17,285 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,286 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,286 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,288 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,288 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,289 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:31:17,290 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:31:17,293 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:31:17,293 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729529, jitterRate=-0.07235640287399292}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:31:17,294 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188677277Initializing all the Stores at 1732188677278 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677278Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188677278Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188677278Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188677278Cleaning up temporary data from old regions at 1732188677288 (+10 ms)Region opened successfully at 1732188677294 (+6 ms) 2024-11-21T11:31:17,294 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:31:17,298 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40676ecc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:31:17,299 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:31:17,299 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:31:17,299 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:31:17,300 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:31:17,303 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-11-21T11:31:17,304 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:31:17,304 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:31:17,306 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:31:17,307 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:31:17,309 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:31:17,310 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:31:17,310 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:31:17,311 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:31:17,312 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:31:17,313 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:31:17,314 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:31:17,315 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:31:17,316 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:31:17,317 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:31:17,320 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:31:17,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:31:17,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:31:17,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,322 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,33889,1732188677135, sessionid=0x1013a4b8e910000, setting cluster-up flag (Was=false) 2024-11-21T11:31:17,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,329 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:31:17,330 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,337 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:31:17,338 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,33889,1732188677135 2024-11-21T11:31:17,339 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:31:17,341 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:31:17,341 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:31:17,342 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:31:17,342 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,33889,1732188677135 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:31:17,343 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188707344 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:31:17,344 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:31:17,345 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:31:17,345 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:31:17,345 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188677345,5,FailOnTimeoutGroup] 2024-11-21T11:31:17,345 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188677345,5,FailOnTimeoutGroup] 2024-11-21T11:31:17,345 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,346 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:31:17,346 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,346 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,346 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,346 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:31:17,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:31:17,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:31:17,353 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:31:17,354 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2 2024-11-21T11:31:17,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:31:17,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:31:17,360 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:17,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:31:17,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:31:17,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:31:17,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:31:17,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:31:17,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:31:17,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:31:17,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:31:17,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,367 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:31:17,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740 2024-11-21T11:31:17,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740 2024-11-21T11:31:17,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:31:17,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:31:17,369 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:31:17,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:31:17,372 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:31:17,372 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753255, jitterRate=-0.042187631130218506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:31:17,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188677360Initializing all the Stores at 1732188677361 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677361Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677361Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188677361Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677361Cleaning up temporary data from old regions at 1732188677369 (+8 ms)Region opened successfully at 1732188677373 (+4 ms) 2024-11-21T11:31:17,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:31:17,373 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:31:17,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:31:17,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:31:17,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:31:17,374 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:31:17,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188677373Disabling compacts and flushes for region at 1732188677373Disabling writes for close at 1732188677373Writing region close event to WAL at 1732188677374 (+1 ms)Closed at 1732188677374 2024-11-21T11:31:17,375 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:31:17,375 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:31:17,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:31:17,377 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:31:17,378 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:31:17,393 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(746): ClusterId : 0b1f9a07-9b9c-4dcb-8579-10b1b58e24b6 2024-11-21T11:31:17,393 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:31:17,396 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:31:17,396 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:31:17,397 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:31:17,398 DEBUG [RS:0;7b462513bfc2:46293 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42fb775a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:31:17,409 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:46293 2024-11-21T11:31:17,409 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:31:17,409 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:31:17,409 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:31:17,410 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,33889,1732188677135 with port=46293, startcode=1732188677179 2024-11-21T11:31:17,410 DEBUG [RS:0;7b462513bfc2:46293 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:31:17,412 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60545, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:31:17,413 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33889 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,413 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33889 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,414 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2 2024-11-21T11:31:17,414 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43993 2024-11-21T11:31:17,414 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:31:17,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:31:17,416 DEBUG [RS:0;7b462513bfc2:46293 {}] zookeeper.ZKUtil(111): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,416 WARN [RS:0;7b462513bfc2:46293 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:31:17,416 INFO [RS:0;7b462513bfc2:46293 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:31:17,417 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,417 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,46293,1732188677179] 2024-11-21T11:31:17,420 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:31:17,421 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:31:17,422 INFO [RS:0;7b462513bfc2:46293 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:31:17,422 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,422 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:31:17,423 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:31:17,423 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:31:17,423 DEBUG [RS:0;7b462513bfc2:46293 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,424 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46293,1732188677179-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:31:17,439 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:31:17,439 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,46293,1732188677179-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,439 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,439 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.Replication(171): 7b462513bfc2,46293,1732188677179 started 2024-11-21T11:31:17,453 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,453 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,46293,1732188677179, RpcServer on 7b462513bfc2/172.17.0.2:46293, sessionid=0x1013a4b8e910001 2024-11-21T11:31:17,453 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:31:17,453 DEBUG [RS:0;7b462513bfc2:46293 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,453 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,46293,1732188677179' 2024-11-21T11:31:17,453 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,46293,1732188677179' 2024-11-21T11:31:17,454 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:31:17,455 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:31:17,455 DEBUG [RS:0;7b462513bfc2:46293 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:31:17,455 INFO [RS:0;7b462513bfc2:46293 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:31:17,455 INFO [RS:0;7b462513bfc2:46293 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:31:17,528 WARN [7b462513bfc2:33889 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:31:17,557 INFO [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C46293%2C1732188677179, suffix=, logDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179, archiveDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs, maxLogs=32 2024-11-21T11:31:17,558 INFO [RS:0;7b462513bfc2:46293 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46293%2C1732188677179.1732188677557 2024-11-21T11:31:17,563 INFO [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188677557 2024-11-21T11:31:17,568 DEBUG [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:41731:41731)] 2024-11-21T11:31:17,778 DEBUG [7b462513bfc2:33889 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:31:17,779 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,780 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,46293,1732188677179, state=OPENING 2024-11-21T11:31:17,782 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:31:17,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:31:17,784 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:31:17,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,46293,1732188677179}] 2024-11-21T11:31:17,784 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:31:17,784 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:31:17,938 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:31:17,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38189, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:31:17,943 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:31:17,943 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:31:17,945 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C46293%2C1732188677179.meta, suffix=.meta, logDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179, archiveDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs, maxLogs=32 2024-11-21T11:31:17,946 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46293%2C1732188677179.meta.1732188677946.meta 2024-11-21T11:31:17,951 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.meta.1732188677946.meta 2024-11-21T11:31:17,955 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:41731:41731)] 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:31:17,957 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:31:17,957 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:31:17,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:31:17,960 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:31:17,960 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:31:17,961 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:31:17,961 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:31:17,962 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:31:17,962 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:31:17,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:31:17,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:17,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:31:17,963 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:31:17,964 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740 2024-11-21T11:31:17,965 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740 2024-11-21T11:31:17,966 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:31:17,966 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:31:17,966 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:31:17,967 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:31:17,968 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849420, jitterRate=0.08009426295757294}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:31:17,968 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:31:17,969 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188677958Writing region info on filesystem at 1732188677958Initializing all the Stores at 1732188677958Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677958Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677959 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188677959Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188677959Cleaning up temporary data from old regions at 1732188677966 (+7 ms)Running coprocessor post-open hooks at 1732188677968 (+2 ms)Region opened successfully at 1732188677969 (+1 ms) 2024-11-21T11:31:17,970 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188677937 2024-11-21T11:31:17,972 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:31:17,973 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:31:17,973 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,974 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,46293,1732188677179, state=OPEN 2024-11-21T11:31:17,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:31:17,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:31:17,978 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:17,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:31:17,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:31:17,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:31:17,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,46293,1732188677179 in 194 msec 2024-11-21T11:31:17,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:31:17,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-21T11:31:17,985 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:31:17,985 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:31:17,986 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:31:17,986 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,46293,1732188677179, seqNum=-1] 2024-11-21T11:31:17,987 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:31:17,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44075, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:31:17,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-11-21T11:31:17,994 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188677994, completionTime=-1 2024-11-21T11:31:17,994 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:31:17,994 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:31:17,995 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:31:17,995 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188737995 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188797996 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:33889, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,996 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:17,998 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.786sec 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:31:18,000 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:31:18,002 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:31:18,002 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:31:18,002 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,33889,1732188677135-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:18,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:18,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:18,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38d3f6f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:31:18,093 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,33889,-1 for getting cluster id 2024-11-21T11:31:18,093 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:31:18,095 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0b1f9a07-9b9c-4dcb-8579-10b1b58e24b6' 2024-11-21T11:31:18,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:31:18,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0b1f9a07-9b9c-4dcb-8579-10b1b58e24b6" 2024-11-21T11:31:18,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79c565cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:31:18,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,33889,-1] 2024-11-21T11:31:18,096 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:31:18,097 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:31:18,098 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:31:18,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@448bc78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:31:18,099 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:31:18,100 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,46293,1732188677179, seqNum=-1] 2024-11-21T11:31:18,100 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:31:18,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:31:18,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,33889,1732188677135 2024-11-21T11:31:18,103 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:31:18,105 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:31:18,106 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T11:31:18,106 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b462513bfc2,33889,1732188677135 2024-11-21T11:31:18,107 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4ef81dc9 2024-11-21T11:31:18,107 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T11:31:18,108 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T11:31:18,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-21T11:31:18,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-21T11:31:18,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:31:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:18,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T11:31:18,111 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:18,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-21T11:31:18,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:31:18,113 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T11:31:18,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741835_1011 (size=405) 2024-11-21T11:31:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741835_1011 (size=405) 2024-11-21T11:31:18,121 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4ca0a58892a20d8cbed52a750cb9110e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2 2024-11-21T11:31:18,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741836_1012 (size=88) 2024-11-21T11:31:18,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741836_1012 (size=88) 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4ca0a58892a20d8cbed52a750cb9110e, disabling compactions & flushes 2024-11-21T11:31:18,128 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. after waiting 0 ms 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,128 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,128 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4ca0a58892a20d8cbed52a750cb9110e: Waiting for close lock at 1732188678128Disabling compacts and flushes for region at 1732188678128Disabling writes for close at 1732188678128Writing region close event to WAL at 1732188678128Closed at 1732188678128 2024-11-21T11:31:18,130 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T11:31:18,130 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732188678130"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188678130"}]},"ts":"1732188678130"} 2024-11-21T11:31:18,133 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T11:31:18,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T11:31:18,134 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188678134"}]},"ts":"1732188678134"} 2024-11-21T11:31:18,136 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-21T11:31:18,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4ca0a58892a20d8cbed52a750cb9110e, ASSIGN}] 2024-11-21T11:31:18,137 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4ca0a58892a20d8cbed52a750cb9110e, ASSIGN 2024-11-21T11:31:18,139 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4ca0a58892a20d8cbed52a750cb9110e, ASSIGN; state=OFFLINE, location=7b462513bfc2,46293,1732188677179; forceNewPlan=false, retain=false 2024-11-21T11:31:18,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4ca0a58892a20d8cbed52a750cb9110e, regionState=OPENING, regionLocation=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:18,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4ca0a58892a20d8cbed52a750cb9110e, ASSIGN because future has completed 2024-11-21T11:31:18,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ca0a58892a20d8cbed52a750cb9110e, server=7b462513bfc2,46293,1732188677179}] 2024-11-21T11:31:18,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:31:18,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T11:31:18,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:31:18,294 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T11:31:18,449 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,450 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4ca0a58892a20d8cbed52a750cb9110e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:31:18,450 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,450 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:31:18,450 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,450 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,452 INFO [StoreOpener-4ca0a58892a20d8cbed52a750cb9110e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,453 INFO [StoreOpener-4ca0a58892a20d8cbed52a750cb9110e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4ca0a58892a20d8cbed52a750cb9110e columnFamilyName info 2024-11-21T11:31:18,453 DEBUG [StoreOpener-4ca0a58892a20d8cbed52a750cb9110e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:31:18,454 INFO [StoreOpener-4ca0a58892a20d8cbed52a750cb9110e-1 {}] regionserver.HStore(327): Store=4ca0a58892a20d8cbed52a750cb9110e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:31:18,454 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,455 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,455 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,455 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,455 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,457 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,459 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:31:18,460 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4ca0a58892a20d8cbed52a750cb9110e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849373, jitterRate=0.08003365993499756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:31:18,460 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:31:18,461 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4ca0a58892a20d8cbed52a750cb9110e: Running coprocessor pre-open hook at 1732188678450Writing region info on filesystem at 1732188678450Initializing all the Stores at 1732188678451 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188678451Cleaning up temporary data from old regions at 1732188678455 (+4 ms)Running coprocessor post-open hooks at 1732188678460 (+5 ms)Region opened successfully at 1732188678461 (+1 ms) 2024-11-21T11:31:18,462 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e., pid=6, masterSystemTime=1732188678445 2024-11-21T11:31:18,464 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,464 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:18,466 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4ca0a58892a20d8cbed52a750cb9110e, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,46293,1732188677179 2024-11-21T11:31:18,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4ca0a58892a20d8cbed52a750cb9110e, server=7b462513bfc2,46293,1732188677179 because future has completed 2024-11-21T11:31:18,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T11:31:18,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4ca0a58892a20d8cbed52a750cb9110e, server=7b462513bfc2,46293,1732188677179 in 177 msec 2024-11-21T11:31:18,474 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T11:31:18,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=4ca0a58892a20d8cbed52a750cb9110e, ASSIGN in 336 msec 2024-11-21T11:31:18,475 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T11:31:18,476 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188678476"}]},"ts":"1732188678476"} 2024-11-21T11:31:18,478 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-21T11:31:18,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T11:31:18,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 372 msec 2024-11-21T11:31:19,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:19,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:20,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:20,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:21,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:21,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:22,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:22,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:23,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:23,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:23,459 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:31:23,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,475 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,482 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:31:23,486 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T11:31:23,487 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-21T11:31:24,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:24,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:25,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:25,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:26,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:26,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:27,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:27,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:28,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:28,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:28,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:31:28,179 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-21T11:31:28,179 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-21T11:31:28,182 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:28,182 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:28,185 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e., hostname=7b462513bfc2,46293,1732188677179, seqNum=2] 2024-11-21T11:31:28,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:28,198 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-21T11:31:28,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T11:31:28,199 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T11:31:28,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T11:31:28,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:28,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-21T11:31:28,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-21T11:31:28,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:28,362 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4ca0a58892a20d8cbed52a750cb9110e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-21T11:31:28,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/ab493bcc61534bb190363be51e7626c4 is 1080, key is row0001/info:/1732188688187/Put/seqid=0 2024-11-21T11:31:28,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741837_1013 (size=6033) 2024-11-21T11:31:28,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741837_1013 (size=6033) 2024-11-21T11:31:28,383 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/ab493bcc61534bb190363be51e7626c4 2024-11-21T11:31:28,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/ab493bcc61534bb190363be51e7626c4 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4 2024-11-21T11:31:28,395 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4, entries=1, sequenceid=5, filesize=5.9 K 2024-11-21T11:31:28,396 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 35ms, sequenceid=5, compaction requested=false 2024-11-21T11:31:28,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4ca0a58892a20d8cbed52a750cb9110e: 2024-11-21T11:31:28,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:28,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-21T11:31:28,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-21T11:31:28,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-21T11:31:28,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-21T11:31:28,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 211 msec 2024-11-21T11:31:29,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:29,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:30,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:30,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:31,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:31,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:32,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:32,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:33,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:33,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:34,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:34,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:35,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:35,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:36,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:36,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:37,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:37,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:38,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:38,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T11:31:38,229 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-21T11:31:38,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-21T11:31:38,235 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-21T11:31:38,236 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T11:31:38,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T11:31:38,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-21T11:31:38,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:38,390 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 4ca0a58892a20d8cbed52a750cb9110e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-21T11:31:38,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/1e8b047d6bf5465aaa85da05cf62c70c is 1080, key is row0002/info:/1732188698230/Put/seqid=0 2024-11-21T11:31:38,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741838_1014 (size=6033) 2024-11-21T11:31:38,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741838_1014 (size=6033) 2024-11-21T11:31:38,400 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/1e8b047d6bf5465aaa85da05cf62c70c 2024-11-21T11:31:38,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/1e8b047d6bf5465aaa85da05cf62c70c as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c 2024-11-21T11:31:38,410 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c, entries=1, sequenceid=9, filesize=5.9 K 2024-11-21T11:31:38,411 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 21ms, sequenceid=9, compaction requested=false 2024-11-21T11:31:38,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 4ca0a58892a20d8cbed52a750cb9110e: 2024-11-21T11:31:38,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:38,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-21T11:31:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-21T11:31:38,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T11:31:38,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-21T11:31:38,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-21T11:31:39,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:39,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:40,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:40,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:41,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:41,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:42,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:42,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:43,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:43,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:44,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:44,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:44,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:44,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta after 68031ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T11:31:45,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:45,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:46,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:46,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:47,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:47,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:47,119 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:31:48,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:48,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-21T11:31:48,289 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-21T11:31:48,291 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46293%2C1732188677179.1732188708291 2024-11-21T11:31:48,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:48,297 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:48,297 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:48,297 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:48,297 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:48,297 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188677557 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188708291 2024-11-21T11:31:48,298 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:41731:41731)] 2024-11-21T11:31:48,298 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188677557 is not closed yet, will try archiving it next time 2024-11-21T11:31:48,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:48,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741833_1009 (size=5546) 2024-11-21T11:31:48,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741833_1009 (size=5546) 2024-11-21T11:31:48,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-21T11:31:48,301 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-21T11:31:48,302 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T11:31:48,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T11:31:48,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-21T11:31:48,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:48,456 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 4ca0a58892a20d8cbed52a750cb9110e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-21T11:31:48,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/e413daeb1da14200b70c1f9865cd1032 is 1080, key is row0003/info:/1732188708290/Put/seqid=0 2024-11-21T11:31:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741840_1016 (size=6033) 2024-11-21T11:31:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741840_1016 (size=6033) 2024-11-21T11:31:48,465 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/e413daeb1da14200b70c1f9865cd1032 2024-11-21T11:31:48,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/e413daeb1da14200b70c1f9865cd1032 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032 2024-11-21T11:31:48,475 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032, entries=1, sequenceid=13, filesize=5.9 K 2024-11-21T11:31:48,476 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 21ms, sequenceid=13, compaction requested=true 2024-11-21T11:31:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 4ca0a58892a20d8cbed52a750cb9110e: 2024-11-21T11:31:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:48,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-21T11:31:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-21T11:31:48,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-21T11:31:48,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-21T11:31:48,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-21T11:31:49,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:49,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:50,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:50,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:51,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:51,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:52,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:52,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:53,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:53,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:54,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:54,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:55,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:55,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:56,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:56,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:57,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:57,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:58,003 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T11:31:58,003 INFO [master/7b462513bfc2:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T11:31:58,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:58,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-21T11:31:58,349 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-21T11:31:58,349 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:31:58,351 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:31:58,351 DEBUG [Time-limited test {}] regionserver.HStore(1541): 4ca0a58892a20d8cbed52a750cb9110e/info is initiating minor compaction (all files) 2024-11-21T11:31:58,351 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:31:58,351 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:31:58,351 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 4ca0a58892a20d8cbed52a750cb9110e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:58,351 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032] into tmpdir=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp, totalSize=17.7 K 2024-11-21T11:31:58,352 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ab493bcc61534bb190363be51e7626c4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732188688187 2024-11-21T11:31:58,352 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1e8b047d6bf5465aaa85da05cf62c70c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732188698230 2024-11-21T11:31:58,353 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e413daeb1da14200b70c1f9865cd1032, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732188708290 2024-11-21T11:31:58,363 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 4ca0a58892a20d8cbed52a750cb9110e#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:31:58,363 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/b55f1ec6227c4c3a9c845e292e87d517 is 1080, key is row0001/info:/1732188688187/Put/seqid=0 2024-11-21T11:31:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741841_1017 (size=8296) 2024-11-21T11:31:58,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741841_1017 (size=8296) 2024-11-21T11:31:58,375 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/b55f1ec6227c4c3a9c845e292e87d517 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/b55f1ec6227c4c3a9c845e292e87d517 2024-11-21T11:31:58,382 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4ca0a58892a20d8cbed52a750cb9110e/info of 4ca0a58892a20d8cbed52a750cb9110e into b55f1ec6227c4c3a9c845e292e87d517(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:31:58,382 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 4ca0a58892a20d8cbed52a750cb9110e: 2024-11-21T11:31:58,385 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46293%2C1732188677179.1732188718385 2024-11-21T11:31:58,391 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:58,391 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:58,391 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:58,391 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:58,392 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:31:58,392 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188708291 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188718385 2024-11-21T11:31:58,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741839_1015 (size=2520) 2024-11-21T11:31:58,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741839_1015 (size=2520) 2024-11-21T11:31:58,397 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188677557 to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs/7b462513bfc2%2C46293%2C1732188677179.1732188677557 2024-11-21T11:31:58,397 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:41731:41731)] 2024-11-21T11:31:58,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:31:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-21T11:31:58,400 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-21T11:31:58,402 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-21T11:31:58,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-21T11:31:58,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46293 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-21T11:31:58,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:58,555 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 4ca0a58892a20d8cbed52a750cb9110e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-21T11:31:58,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/866e505820bd4f5c9370fe8ef5301e9e is 1080, key is row0000/info:/1732188718383/Put/seqid=0 2024-11-21T11:31:58,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741843_1019 (size=6033) 2024-11-21T11:31:58,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741843_1019 (size=6033) 2024-11-21T11:31:58,565 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/866e505820bd4f5c9370fe8ef5301e9e 2024-11-21T11:31:58,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/866e505820bd4f5c9370fe8ef5301e9e as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/866e505820bd4f5c9370fe8ef5301e9e 2024-11-21T11:31:58,576 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/866e505820bd4f5c9370fe8ef5301e9e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-21T11:31:58,577 INFO [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 22ms, sequenceid=18, compaction requested=false 2024-11-21T11:31:58,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 4ca0a58892a20d8cbed52a750cb9110e: 2024-11-21T11:31:58,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:31:58,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-21T11:31:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-21T11:31:58,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-21T11:31:58,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-21T11:31:58,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-21T11:31:59,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:31:59,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:00,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:00,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:01,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:01,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:02,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:02,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:03,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:03,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:03,450 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4ca0a58892a20d8cbed52a750cb9110e, had cached 0 bytes from a total of 14329 2024-11-21T11:32:04,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:04,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:05,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:05,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:06,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:06,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:07,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:07,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:08,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:08,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-21T11:32:08,409 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-21T11:32:08,412 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C46293%2C1732188677179.1732188728412 2024-11-21T11:32:08,418 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,418 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,419 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,419 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,419 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,419 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188718385 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188728412 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41731:41731),(127.0.0.1/127.0.0.1:41993:41993)] 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188718385 is not closed yet, will try archiving it next time 2024-11-21T11:32:08,420 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:32:08,420 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/WALs/7b462513bfc2,46293,1732188677179/7b462513bfc2%2C46293%2C1732188677179.1732188708291 to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs/7b462513bfc2%2C46293%2C1732188677179.1732188708291 2024-11-21T11:32:08,420 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:08,420 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:32:08,420 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:32:08,421 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=48120627, stopped=false 2024-11-21T11:32:08,421 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,33889,1732188677135 2024-11-21T11:32:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741842_1018 (size=2026) 2024-11-21T11:32:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741842_1018 (size=2026) 2024-11-21T11:32:08,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:08,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:08,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:08,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:08,425 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:08,425 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:08,425 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:08,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:08,425 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,46293,1732188677179' ***** 2024-11-21T11:32:08,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:08,425 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:32:08,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:08,425 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:32:08,426 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(3091): Received CLOSE for 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,46293,1732188677179 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:08,426 INFO [RS:0;7b462513bfc2:46293 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:46293. 2024-11-21T11:32:08,426 DEBUG [RS:0;7b462513bfc2:46293 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:08,426 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4ca0a58892a20d8cbed52a750cb9110e, disabling compactions & flushes 2024-11-21T11:32:08,426 DEBUG [RS:0;7b462513bfc2:46293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:08,426 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:32:08,426 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. after waiting 0 ms 2024-11-21T11:32:08,427 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:32:08,427 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:32:08,427 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:32:08,427 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:32:08,427 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 4ca0a58892a20d8cbed52a750cb9110e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-21T11:32:08,427 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T11:32:08,427 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4ca0a58892a20d8cbed52a750cb9110e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.} 2024-11-21T11:32:08,427 DEBUG [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4ca0a58892a20d8cbed52a750cb9110e 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:32:08,427 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:32:08,427 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:32:08,427 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-21T11:32:08,431 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/917de231efca435ca56ad0204ec64ff8 is 1080, key is row0001/info:/1732188728411/Put/seqid=0 2024-11-21T11:32:08,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741845_1021 (size=6033) 2024-11-21T11:32:08,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741845_1021 (size=6033) 2024-11-21T11:32:08,437 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/917de231efca435ca56ad0204ec64ff8 2024-11-21T11:32:08,443 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/.tmp/info/917de231efca435ca56ad0204ec64ff8 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/917de231efca435ca56ad0204ec64ff8 2024-11-21T11:32:08,444 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/info/d8751b43fe3e48428bece7d9d1264896 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e./info:regioninfo/1732188678465/Put/seqid=0 2024-11-21T11:32:08,450 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/917de231efca435ca56ad0204ec64ff8, entries=1, sequenceid=22, filesize=5.9 K 2024-11-21T11:32:08,451 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 24ms, sequenceid=22, compaction requested=true 2024-11-21T11:32:08,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741846_1022 (size=7308) 2024-11-21T11:32:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741846_1022 (size=7308) 2024-11-21T11:32:08,452 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032] to archive 2024-11-21T11:32:08,452 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/info/d8751b43fe3e48428bece7d9d1264896 2024-11-21T11:32:08,453 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:32:08,454 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4 to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/ab493bcc61534bb190363be51e7626c4 2024-11-21T11:32:08,455 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/1e8b047d6bf5465aaa85da05cf62c70c 2024-11-21T11:32:08,457 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032 to hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/info/e413daeb1da14200b70c1f9865cd1032 2024-11-21T11:32:08,457 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7b462513bfc2:33889 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T11:32:08,457 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [ab493bcc61534bb190363be51e7626c4=6033, 1e8b047d6bf5465aaa85da05cf62c70c=6033, e413daeb1da14200b70c1f9865cd1032=6033] 2024-11-21T11:32:08,461 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/4ca0a58892a20d8cbed52a750cb9110e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-21T11:32:08,462 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:32:08,462 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4ca0a58892a20d8cbed52a750cb9110e: Waiting for close lock at 1732188728426Running coprocessor pre-close hooks at 1732188728426Disabling compacts and flushes for region at 1732188728426Disabling writes for close at 1732188728427 (+1 ms)Obtaining lock to block concurrent updates at 1732188728427Preparing flush snapshotting stores in 4ca0a58892a20d8cbed52a750cb9110e at 1732188728427Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732188728427Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. at 1732188728428 (+1 ms)Flushing 4ca0a58892a20d8cbed52a750cb9110e/info: creating writer at 1732188728428Flushing 4ca0a58892a20d8cbed52a750cb9110e/info: appending metadata at 1732188728431 (+3 ms)Flushing 4ca0a58892a20d8cbed52a750cb9110e/info: closing flushed file at 1732188728431Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31586ceb: reopening flushed file at 1732188728442 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 4ca0a58892a20d8cbed52a750cb9110e in 24ms, sequenceid=22, compaction requested=true at 1732188728451 (+9 ms)Writing region close event to WAL at 1732188728458 (+7 ms)Running coprocessor post-close hooks at 1732188728462 (+4 ms)Closed at 1732188728462 2024-11-21T11:32:08,462 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732188678108.4ca0a58892a20d8cbed52a750cb9110e. 2024-11-21T11:32:08,472 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/ns/c7584871af9a4da6b29e77223f3f1dac is 43, key is default/ns:d/1732188677989/Put/seqid=0 2024-11-21T11:32:08,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741847_1023 (size=5153) 2024-11-21T11:32:08,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741847_1023 (size=5153) 2024-11-21T11:32:08,477 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/ns/c7584871af9a4da6b29e77223f3f1dac 2024-11-21T11:32:08,495 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/table/58d19ac35e8a439c9ab6cc27a3f6b649 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732188678476/Put/seqid=0 2024-11-21T11:32:08,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741848_1024 (size=5508) 2024-11-21T11:32:08,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741848_1024 (size=5508) 2024-11-21T11:32:08,500 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/table/58d19ac35e8a439c9ab6cc27a3f6b649 2024-11-21T11:32:08,505 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/info/d8751b43fe3e48428bece7d9d1264896 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/info/d8751b43fe3e48428bece7d9d1264896 2024-11-21T11:32:08,510 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/info/d8751b43fe3e48428bece7d9d1264896, entries=10, sequenceid=11, filesize=7.1 K 2024-11-21T11:32:08,511 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/ns/c7584871af9a4da6b29e77223f3f1dac as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/ns/c7584871af9a4da6b29e77223f3f1dac 2024-11-21T11:32:08,515 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/ns/c7584871af9a4da6b29e77223f3f1dac, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T11:32:08,516 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/.tmp/table/58d19ac35e8a439c9ab6cc27a3f6b649 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/table/58d19ac35e8a439c9ab6cc27a3f6b649 2024-11-21T11:32:08,520 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/table/58d19ac35e8a439c9ab6cc27a3f6b649, entries=2, sequenceid=11, filesize=5.4 K 2024-11-21T11:32:08,521 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-11-21T11:32:08,526 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T11:32:08,526 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:32:08,527 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:08,527 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188728427Running coprocessor pre-close hooks at 1732188728427Disabling compacts and flushes for region at 1732188728427Disabling writes for close at 1732188728427Obtaining lock to block concurrent updates at 1732188728427Preparing flush snapshotting stores in 1588230740 at 1732188728427Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732188728428 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732188728428Flushing 1588230740/info: creating writer at 1732188728428Flushing 1588230740/info: appending metadata at 1732188728444 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732188728444Flushing 1588230740/ns: creating writer at 1732188728457 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732188728472 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732188728472Flushing 1588230740/table: creating writer at 1732188728482 (+10 ms)Flushing 1588230740/table: appending metadata at 1732188728495 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732188728495Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ce23441: reopening flushed file at 1732188728505 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@597544ba: reopening flushed file at 1732188728510 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b066c33: reopening flushed file at 1732188728515 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1732188728521 (+6 ms)Writing region close event to WAL at 1732188728523 (+2 ms)Running coprocessor post-close hooks at 1732188728526 (+3 ms)Closed at 1732188728526 2024-11-21T11:32:08,527 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:08,627 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,46293,1732188677179; all regions closed. 2024-11-21T11:32:08,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741834_1010 (size=3306) 2024-11-21T11:32:08,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741834_1010 (size=3306) 2024-11-21T11:32:08,633 DEBUG [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs 2024-11-21T11:32:08,633 INFO [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C46293%2C1732188677179.meta:.meta(num 1732188677946) 2024-11-21T11:32:08,633 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,633 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,633 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,633 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741844_1020 (size=1252) 2024-11-21T11:32:08,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741844_1020 (size=1252) 2024-11-21T11:32:08,638 DEBUG [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/oldWALs 2024-11-21T11:32:08,638 INFO [RS:0;7b462513bfc2:46293 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C46293%2C1732188677179:(num 1732188728412) 2024-11-21T11:32:08,638 DEBUG [RS:0;7b462513bfc2:46293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:08,638 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:32:08,638 INFO [RS:0;7b462513bfc2:46293 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:08,639 INFO [RS:0;7b462513bfc2:46293 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:08,639 INFO [RS:0;7b462513bfc2:46293 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:08,639 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:08,639 INFO [RS:0;7b462513bfc2:46293 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46293 2024-11-21T11:32:08,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,46293,1732188677179 2024-11-21T11:32:08,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:32:08,641 INFO [RS:0;7b462513bfc2:46293 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:08,642 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,46293,1732188677179] 2024-11-21T11:32:08,643 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,46293,1732188677179 already deleted, retry=false 2024-11-21T11:32:08,643 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,46293,1732188677179 expired; onlineServers=0 2024-11-21T11:32:08,643 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,33889,1732188677135' ***** 2024-11-21T11:32:08,643 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:32:08,643 INFO [M:0;7b462513bfc2:33889 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:08,644 DEBUG [M:0;7b462513bfc2:33889 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:32:08,644 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:32:08,644 DEBUG [M:0;7b462513bfc2:33889 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:32:08,644 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188677345 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188677345,5,FailOnTimeoutGroup] 2024-11-21T11:32:08,644 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188677345 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188677345,5,FailOnTimeoutGroup] 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:08,644 DEBUG [M:0;7b462513bfc2:33889 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:08,644 INFO [M:0;7b462513bfc2:33889 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:32:08,644 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:32:08,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:32:08,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:08,646 DEBUG [M:0;7b462513bfc2:33889 {}] zookeeper.ZKUtil(347): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:32:08,646 WARN [M:0;7b462513bfc2:33889 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:32:08,646 INFO [M:0;7b462513bfc2:33889 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/.lastflushedseqids 2024-11-21T11:32:08,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741849_1025 (size=130) 2024-11-21T11:32:08,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741849_1025 (size=130) 2024-11-21T11:32:08,652 INFO [M:0;7b462513bfc2:33889 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:32:08,652 INFO [M:0;7b462513bfc2:33889 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:32:08,652 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:32:08,653 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:08,653 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:08,653 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:32:08,653 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:08,653 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-21T11:32:08,668 DEBUG [M:0;7b462513bfc2:33889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e99fbc94e91b4005a2cab9c07a3e264f is 82, key is hbase:meta,,1/info:regioninfo/1732188677973/Put/seqid=0 2024-11-21T11:32:08,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741850_1026 (size=5672) 2024-11-21T11:32:08,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741850_1026 (size=5672) 2024-11-21T11:32:08,674 INFO [M:0;7b462513bfc2:33889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e99fbc94e91b4005a2cab9c07a3e264f 2024-11-21T11:32:08,693 DEBUG [M:0;7b462513bfc2:33889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bac05cd119049588ed0ca1a6ad4f6b2 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732188678481/Put/seqid=0 2024-11-21T11:32:08,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741851_1027 (size=7823) 2024-11-21T11:32:08,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741851_1027 (size=7823) 2024-11-21T11:32:08,698 INFO [M:0;7b462513bfc2:33889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bac05cd119049588ed0ca1a6ad4f6b2 2024-11-21T11:32:08,702 INFO [M:0;7b462513bfc2:33889 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8bac05cd119049588ed0ca1a6ad4f6b2 2024-11-21T11:32:08,717 DEBUG [M:0;7b462513bfc2:33889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00194fb146874bde9d512793c453473e is 69, key is 7b462513bfc2,46293,1732188677179/rs:state/1732188677413/Put/seqid=0 2024-11-21T11:32:08,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741852_1028 (size=5156) 2024-11-21T11:32:08,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741852_1028 (size=5156) 2024-11-21T11:32:08,722 INFO [M:0;7b462513bfc2:33889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00194fb146874bde9d512793c453473e 2024-11-21T11:32:08,741 DEBUG [M:0;7b462513bfc2:33889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c15bf60489874de4aa6d4a5e424ccc16 is 52, key is load_balancer_on/state:d/1732188678104/Put/seqid=0 2024-11-21T11:32:08,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:08,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46293-0x1013a4b8e910001, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:08,742 INFO [RS:0;7b462513bfc2:46293 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:08,742 INFO [RS:0;7b462513bfc2:46293 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,46293,1732188677179; zookeeper connection closed. 2024-11-21T11:32:08,743 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@54be4335 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@54be4335 2024-11-21T11:32:08,743 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:32:08,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741853_1029 (size=5056) 2024-11-21T11:32:08,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741853_1029 (size=5056) 2024-11-21T11:32:08,747 INFO [M:0;7b462513bfc2:33889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c15bf60489874de4aa6d4a5e424ccc16 2024-11-21T11:32:08,752 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e99fbc94e91b4005a2cab9c07a3e264f as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e99fbc94e91b4005a2cab9c07a3e264f 2024-11-21T11:32:08,758 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e99fbc94e91b4005a2cab9c07a3e264f, entries=8, sequenceid=121, filesize=5.5 K 2024-11-21T11:32:08,759 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bac05cd119049588ed0ca1a6ad4f6b2 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8bac05cd119049588ed0ca1a6ad4f6b2 2024-11-21T11:32:08,764 INFO [M:0;7b462513bfc2:33889 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8bac05cd119049588ed0ca1a6ad4f6b2 2024-11-21T11:32:08,764 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8bac05cd119049588ed0ca1a6ad4f6b2, entries=14, sequenceid=121, filesize=7.6 K 2024-11-21T11:32:08,765 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00194fb146874bde9d512793c453473e as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00194fb146874bde9d512793c453473e 2024-11-21T11:32:08,769 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00194fb146874bde9d512793c453473e, entries=1, sequenceid=121, filesize=5.0 K 2024-11-21T11:32:08,770 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c15bf60489874de4aa6d4a5e424ccc16 as hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c15bf60489874de4aa6d4a5e424ccc16 2024-11-21T11:32:08,774 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43993/user/jenkins/test-data/73957301-df02-5840-4f93-666020f79ef2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c15bf60489874de4aa6d4a5e424ccc16, entries=1, sequenceid=121, filesize=4.9 K 2024-11-21T11:32:08,775 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false 2024-11-21T11:32:08,777 INFO [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:08,777 DEBUG [M:0;7b462513bfc2:33889 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188728652Disabling compacts and flushes for region at 1732188728652Disabling writes for close at 1732188728653 (+1 ms)Obtaining lock to block concurrent updates at 1732188728653Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188728653Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732188728653Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188728654 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188728654Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188728668 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188728668Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188728678 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188728693 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188728693Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188728703 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188728716 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188728716Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188728727 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188728741 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188728741Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b5766c0: reopening flushed file at 1732188728751 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56c3853b: reopening flushed file at 1732188728758 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c4b2587: reopening flushed file at 1732188728764 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@558b6403: reopening flushed file at 1732188728769 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false at 1732188728775 (+6 ms)Writing region close event to WAL at 1732188728777 (+2 ms)Closed at 1732188728777 2024-11-21T11:32:08,777 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,777 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,777 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:08,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40039 is added to blk_1073741830_1006 (size=53035) 2024-11-21T11:32:08,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42157 is added to blk_1073741830_1006 (size=53035) 2024-11-21T11:32:08,780 INFO [M:0;7b462513bfc2:33889 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:32:08,780 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:08,780 INFO [M:0;7b462513bfc2:33889 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33889 2024-11-21T11:32:08,780 INFO [M:0;7b462513bfc2:33889 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:08,882 INFO [M:0;7b462513bfc2:33889 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:08,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:08,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33889-0x1013a4b8e910000, quorum=127.0.0.1:63013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:08,884 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@206f042f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:08,885 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@374dfdaf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:08,885 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:08,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1367dc96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:08,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@df163d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:08,887 WARN [BP-1453964250-172.17.0.2-1732188676372 heartbeating to localhost/127.0.0.1:43993 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:08,887 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:08,887 WARN [BP-1453964250-172.17.0.2-1732188676372 heartbeating to localhost/127.0.0.1:43993 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1453964250-172.17.0.2-1732188676372 (Datanode Uuid 2eaf071e-39fb-4282-afd5-45759fd28ad5) service to localhost/127.0.0.1:43993 2024-11-21T11:32:08,887 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:08,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data3/current/BP-1453964250-172.17.0.2-1732188676372 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:08,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data4/current/BP-1453964250-172.17.0.2-1732188676372 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:08,888 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:08,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fa04e54{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:08,889 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68744dfe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:08,890 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:08,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b93cee9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:08,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5838a3fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:08,891 WARN [BP-1453964250-172.17.0.2-1732188676372 heartbeating to localhost/127.0.0.1:43993 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:08,891 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:08,891 WARN [BP-1453964250-172.17.0.2-1732188676372 heartbeating to localhost/127.0.0.1:43993 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1453964250-172.17.0.2-1732188676372 (Datanode Uuid 189ec7dc-c409-4486-8836-5ac4e5bdd230) service to localhost/127.0.0.1:43993 2024-11-21T11:32:08,891 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:08,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data1/current/BP-1453964250-172.17.0.2-1732188676372 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:08,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/cluster_73d46690-d356-909e-21c4-af3bb94aa751/data/data2/current/BP-1453964250-172.17.0.2-1732188676372 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:08,892 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:08,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e11e1e0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:32:08,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1159c3f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:08,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:08,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@294894f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:08,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d6dc4f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:08,905 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:32:08,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:32:08,930 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43993 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43993 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43993 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43993 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43993 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43993 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7b462513bfc2:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43993 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43993 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=63 (was 131), ProcessCount=11 (was 11), AvailableMemoryMB=4966 (was 4988) 2024-11-21T11:32:08,938 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=63, ProcessCount=11, AvailableMemoryMB=4966 2024-11-21T11:32:08,938 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.log.dir so I do NOT create it in target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7f0b516f-5632-4722-7c74-b0386ed98be6/hadoop.tmp.dir so I do NOT create it in target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd, deleteOnExit=true 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/test.cache.data in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:32:08,939 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:32:08,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:32:08,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:32:08,953 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:32:09,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:09,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:09,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:09,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:09,013 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:32:09,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:09,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e8771e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:09,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8987cea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:09,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:09,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:09,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35e03861{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/java.io.tmpdir/jetty-localhost-38867-hadoop-hdfs-3_4_1-tests_jar-_-any-12670125870645388625/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:32:09,132 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7142f9c7{HTTP/1.1, (http/1.1)}{localhost:38867} 2024-11-21T11:32:09,132 INFO [Time-limited test {}] server.Server(415): Started @236324ms 2024-11-21T11:32:09,147 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:32:09,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:09,204 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:09,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:09,205 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:09,205 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:32:09,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b395bdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:09,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73447fd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:09,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34e466bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/java.io.tmpdir/jetty-localhost-39113-hadoop-hdfs-3_4_1-tests_jar-_-any-8426549933827440968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:09,320 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4181d37d{HTTP/1.1, (http/1.1)}{localhost:39113} 2024-11-21T11:32:09,320 INFO [Time-limited test {}] server.Server(415): Started @236512ms 2024-11-21T11:32:09,321 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:32:09,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:09,353 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:09,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:09,354 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:09,354 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:32:09,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@671b15e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:09,354 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54fcac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:09,426 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:32:09,448 WARN [Thread-1949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data1/current/BP-1960743717-172.17.0.2-1732188728960/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:09,448 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data2/current/BP-1960743717-172.17.0.2-1732188728960/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:09,465 WARN [Thread-1928 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:32:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cb9c7f78a20ed0f with lease ID 0x69583373dd429060: Processing first storage report for DS-f2830c74-b8d0-41eb-a874-8222c7e2af3e from datanode DatanodeRegistration(127.0.0.1:37673, datanodeUuid=a157e785-e79c-4ae9-a358-082b586e2edd, infoPort=45177, infoSecurePort=0, ipcPort=44711, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960) 2024-11-21T11:32:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cb9c7f78a20ed0f with lease ID 0x69583373dd429060: from storage DS-f2830c74-b8d0-41eb-a874-8222c7e2af3e node DatanodeRegistration(127.0.0.1:37673, datanodeUuid=a157e785-e79c-4ae9-a358-082b586e2edd, infoPort=45177, infoSecurePort=0, ipcPort=44711, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9cb9c7f78a20ed0f with lease ID 0x69583373dd429060: Processing first storage report for DS-ce73ce82-e136-40f1-b038-12f11a311fc4 from datanode DatanodeRegistration(127.0.0.1:37673, datanodeUuid=a157e785-e79c-4ae9-a358-082b586e2edd, infoPort=45177, infoSecurePort=0, ipcPort=44711, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960) 2024-11-21T11:32:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9cb9c7f78a20ed0f with lease ID 0x69583373dd429060: from storage DS-ce73ce82-e136-40f1-b038-12f11a311fc4 node DatanodeRegistration(127.0.0.1:37673, datanodeUuid=a157e785-e79c-4ae9-a358-082b586e2edd, infoPort=45177, infoSecurePort=0, ipcPort=44711, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:09,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cfa2328{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/java.io.tmpdir/jetty-localhost-43503-hadoop-hdfs-3_4_1-tests_jar-_-any-17447073556971840219/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:09,490 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@551592b1{HTTP/1.1, (http/1.1)}{localhost:43503} 2024-11-21T11:32:09,491 INFO [Time-limited test {}] server.Server(415): Started @236683ms 2024-11-21T11:32:09,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:32:09,596 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data3/current/BP-1960743717-172.17.0.2-1732188728960/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:09,596 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data4/current/BP-1960743717-172.17.0.2-1732188728960/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:09,612 WARN [Thread-1964 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:32:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5960eceaefed38 with lease ID 0x69583373dd429061: Processing first storage report for DS-5fe9d635-5e62-4b75-a3fd-53e9d036e4ee from datanode DatanodeRegistration(127.0.0.1:34627, datanodeUuid=8229ecd5-60eb-4920-9abf-c1019806bb2a, infoPort=41719, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960) 2024-11-21T11:32:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5960eceaefed38 with lease ID 0x69583373dd429061: from storage DS-5fe9d635-5e62-4b75-a3fd-53e9d036e4ee node DatanodeRegistration(127.0.0.1:34627, datanodeUuid=8229ecd5-60eb-4920-9abf-c1019806bb2a, infoPort=41719, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e5960eceaefed38 with lease ID 0x69583373dd429061: Processing first storage report for DS-27126155-9c64-4faa-8851-32f6b36ea276 from datanode DatanodeRegistration(127.0.0.1:34627, datanodeUuid=8229ecd5-60eb-4920-9abf-c1019806bb2a, infoPort=41719, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960) 2024-11-21T11:32:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e5960eceaefed38 with lease ID 0x69583373dd429061: from storage DS-27126155-9c64-4faa-8851-32f6b36ea276 node DatanodeRegistration(127.0.0.1:34627, datanodeUuid=8229ecd5-60eb-4920-9abf-c1019806bb2a, infoPort=41719, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1878654910;c=1732188728960), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:09,715 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58 2024-11-21T11:32:09,718 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/zookeeper_0, clientPort=49560, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:32:09,718 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49560 2024-11-21T11:32:09,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:32:09,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:32:09,730 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41 with version=8 2024-11-21T11:32:09,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:32:09,732 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:32:09,732 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:32:09,733 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44595 2024-11-21T11:32:09,734 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44595 connecting to ZooKeeper ensemble=127.0.0.1:49560 2024-11-21T11:32:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:445950x0, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:32:09,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44595-0x1013a4c5c050000 connected 2024-11-21T11:32:09,763 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:09,767 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41, hbase.cluster.distributed=false 2024-11-21T11:32:09,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:32:09,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44595 2024-11-21T11:32:09,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44595 2024-11-21T11:32:09,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44595 2024-11-21T11:32:09,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44595 2024-11-21T11:32:09,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44595 2024-11-21T11:32:09,784 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:32:09,784 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:32:09,785 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:32:09,785 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44605 2024-11-21T11:32:09,786 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44605 connecting to ZooKeeper ensemble=127.0.0.1:49560 2024-11-21T11:32:09,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446050x0, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:32:09,792 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44605-0x1013a4c5c050001 connected 2024-11-21T11:32:09,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:09,792 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:32:09,793 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:32:09,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:32:09,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:32:09,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-21T11:32:09,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44605 2024-11-21T11:32:09,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44605 2024-11-21T11:32:09,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-21T11:32:09,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44605 2024-11-21T11:32:09,807 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:44595 2024-11-21T11:32:09,807 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:09,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:09,809 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:32:09,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,810 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:32:09,811 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,44595,1732188729732 from backup master directory 2024-11-21T11:32:09,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:09,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:09,813 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:32:09,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,817 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/hbase.id] with ID: dc7a0a56-4003-42c7-b38d-adf4cecbc3c6 2024-11-21T11:32:09,817 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/.tmp/hbase.id 2024-11-21T11:32:09,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:32:09,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:32:09,823 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/.tmp/hbase.id]:[hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/hbase.id] 2024-11-21T11:32:09,834 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:09,834 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:32:09,835 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:32:09,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:32:09,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:32:09,844 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:32:09,845 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:32:09,845 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:09,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:32:09,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:32:09,852 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store 2024-11-21T11:32:09,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:32:09,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:32:09,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:09,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:32:09,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:09,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:09,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:32:09,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:09,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:09,859 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188729858Disabling compacts and flushes for region at 1732188729858Disabling writes for close at 1732188729858Writing region close event to WAL at 1732188729858Closed at 1732188729858 2024-11-21T11:32:09,859 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/.initializing 2024-11-21T11:32:09,859 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/WALs/7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,862 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C44595%2C1732188729732, suffix=, logDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/WALs/7b462513bfc2,44595,1732188729732, archiveDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/oldWALs, maxLogs=10 2024-11-21T11:32:09,862 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C44595%2C1732188729732.1732188729862 2024-11-21T11:32:09,868 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/WALs/7b462513bfc2,44595,1732188729732/7b462513bfc2%2C44595%2C1732188729732.1732188729862 2024-11-21T11:32:09,869 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45177:45177),(127.0.0.1/127.0.0.1:41719:41719)] 2024-11-21T11:32:09,869 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:32:09,870 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:09,870 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,870 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:32:09,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:09,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:32:09,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:09,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:32:09,875 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:09,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:32:09,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:09,876 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,877 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,877 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,878 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,878 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,879 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:32:09,879 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:09,881 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:32:09,881 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775193, jitterRate=-0.01429225504398346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:32:09,882 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188729870Initializing all the Stores at 1732188729870Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188729870Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188729871 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188729871Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188729871Cleaning up temporary data from old regions at 1732188729878 (+7 ms)Region opened successfully at 1732188729882 (+4 ms) 2024-11-21T11:32:09,882 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:32:09,885 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb60978, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:32:09,885 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:32:09,885 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:32:09,886 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:32:09,886 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:32:09,886 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T11:32:09,886 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:32:09,886 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:32:09,888 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:32:09,889 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:32:09,890 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:32:09,890 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:32:09,891 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:32:09,892 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:32:09,892 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:32:09,893 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:32:09,895 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:32:09,895 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:32:09,896 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:32:09,898 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:32:09,899 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:32:09,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:09,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:09,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,901 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,44595,1732188729732, sessionid=0x1013a4c5c050000, setting cluster-up flag (Was=false) 2024-11-21T11:32:09,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,909 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:32:09,909 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:09,917 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:32:09,917 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,44595,1732188729732 2024-11-21T11:32:09,918 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:32:09,920 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:09,920 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:32:09,920 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:32:09,921 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,44595,1732188729732 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:32:09,922 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:09,924 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188759924 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:32:09,925 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:32:09,925 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:32:09,925 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:32:09,926 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:32:09,926 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:32:09,926 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,926 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:32:09,926 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188729926,5,FailOnTimeoutGroup] 2024-11-21T11:32:09,927 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188729927,5,FailOnTimeoutGroup] 2024-11-21T11:32:09,927 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:09,927 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:32:09,927 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:09,927 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:09,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:32:09,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:32:09,935 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:32:09,935 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41 2024-11-21T11:32:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:32:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:32:09,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:09,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:32:09,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:32:09,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:09,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:32:09,946 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:32:09,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:09,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:32:09,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:32:09,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:09,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:32:09,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:32:09,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:09,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:09,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:32:09,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740 2024-11-21T11:32:09,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740 2024-11-21T11:32:09,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:32:09,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:32:09,953 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:32:09,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:32:09,956 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:32:09,956 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737804, jitterRate=-0.06183382868766785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:32:09,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188729943Initializing all the Stores at 1732188729943Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188729943Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188729944 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188729944Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188729944Cleaning up temporary data from old regions at 1732188729952 (+8 ms)Region opened successfully at 1732188729957 (+5 ms) 2024-11-21T11:32:09,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:32:09,957 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:32:09,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:32:09,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:32:09,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:32:09,957 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:09,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188729957Disabling compacts and flushes for region at 1732188729957Disabling writes for close at 1732188729957Writing region close event to WAL at 1732188729957Closed at 1732188729957 2024-11-21T11:32:09,959 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:09,959 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:32:09,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:32:09,960 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:32:09,961 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:32:09,997 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(746): ClusterId : dc7a0a56-4003-42c7-b38d-adf4cecbc3c6 2024-11-21T11:32:09,997 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:32:09,999 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:32:09,999 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:32:10,001 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:32:10,001 DEBUG [RS:0;7b462513bfc2:44605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1176d93c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:32:10,013 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:44605 2024-11-21T11:32:10,013 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:32:10,013 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:32:10,013 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:32:10,014 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,44595,1732188729732 with port=44605, startcode=1732188729784 2024-11-21T11:32:10,014 DEBUG [RS:0;7b462513bfc2:44605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:32:10,016 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57383, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:32:10,016 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,016 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,018 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41 2024-11-21T11:32:10,018 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39871 2024-11-21T11:32:10,018 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:32:10,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:32:10,021 DEBUG [RS:0;7b462513bfc2:44605 {}] zookeeper.ZKUtil(111): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,021 WARN [RS:0;7b462513bfc2:44605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:32:10,021 INFO [RS:0;7b462513bfc2:44605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:10,021 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,021 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,44605,1732188729784] 2024-11-21T11:32:10,024 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:32:10,026 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:32:10,026 INFO [RS:0;7b462513bfc2:44605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:32:10,026 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,026 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:32:10,027 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:32:10,027 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,027 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:32:10,028 DEBUG [RS:0;7b462513bfc2:44605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,028 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44605,1732188729784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:32:10,048 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:32:10,049 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44605,1732188729784-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,049 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,049 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.Replication(171): 7b462513bfc2,44605,1732188729784 started 2024-11-21T11:32:10,065 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,065 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,44605,1732188729784, RpcServer on 7b462513bfc2/172.17.0.2:44605, sessionid=0x1013a4c5c050001 2024-11-21T11:32:10,065 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:32:10,065 DEBUG [RS:0;7b462513bfc2:44605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,065 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,44605,1732188729784' 2024-11-21T11:32:10,065 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:32:10,065 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,44605,1732188729784' 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:32:10,066 DEBUG [RS:0;7b462513bfc2:44605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:32:10,066 INFO [RS:0;7b462513bfc2:44605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:32:10,066 INFO [RS:0;7b462513bfc2:44605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:32:10,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:10,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:10,111 WARN [7b462513bfc2:44595 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:32:10,168 INFO [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C44605%2C1732188729784, suffix=, logDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784, archiveDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs, maxLogs=32 2024-11-21T11:32:10,169 INFO [RS:0;7b462513bfc2:44605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C44605%2C1732188729784.1732188730169 2024-11-21T11:32:10,174 INFO [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188730169 2024-11-21T11:32:10,175 DEBUG [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41719:41719),(127.0.0.1/127.0.0.1:45177:45177)] 2024-11-21T11:32:10,362 DEBUG [7b462513bfc2:44595 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:32:10,362 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,364 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,44605,1732188729784, state=OPENING 2024-11-21T11:32:10,365 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:32:10,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:10,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:10,367 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:32:10,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:10,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:10,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,44605,1732188729784}] 2024-11-21T11:32:10,520 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:32:10,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37739, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:32:10,525 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:32:10,525 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:10,527 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C44605%2C1732188729784.meta, suffix=.meta, logDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784, archiveDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs, maxLogs=32 2024-11-21T11:32:10,527 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C44605%2C1732188729784.meta.1732188730527.meta 2024-11-21T11:32:10,533 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.meta.1732188730527.meta 2024-11-21T11:32:10,534 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45177:45177),(127.0.0.1/127.0.0.1:41719:41719)] 2024-11-21T11:32:10,534 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:32:10,535 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:32:10,535 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:32:10,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:32:10,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:32:10,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:10,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:32:10,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:32:10,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:10,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:32:10,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:32:10,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:10,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:32:10,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:32:10,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:10,541 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:32:10,542 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740 2024-11-21T11:32:10,543 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740 2024-11-21T11:32:10,544 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:32:10,544 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:32:10,544 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:32:10,545 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:32:10,546 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849021, jitterRate=0.07958690822124481}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:32:10,546 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:32:10,547 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188730535Writing region info on filesystem at 1732188730535Initializing all the Stores at 1732188730536 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188730536Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188730536Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188730536Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188730536Cleaning up temporary data from old regions at 1732188730544 (+8 ms)Running coprocessor post-open hooks at 1732188730546 (+2 ms)Region opened successfully at 1732188730546 2024-11-21T11:32:10,548 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188730519 2024-11-21T11:32:10,550 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:32:10,550 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:32:10,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,552 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,44605,1732188729784, state=OPEN 2024-11-21T11:32:10,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:32:10,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:32:10,558 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:10,558 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:10,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:32:10,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,44605,1732188729784 in 191 msec 2024-11-21T11:32:10,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:32:10,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-11-21T11:32:10,563 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:10,563 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:32:10,565 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:32:10,565 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,44605,1732188729784, seqNum=-1] 2024-11-21T11:32:10,565 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:32:10,566 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34043, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:32:10,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-21T11:32:10,571 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188730571, completionTime=-1 2024-11-21T11:32:10,571 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:32:10,571 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:32:10,573 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:32:10,573 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188790573 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188850573 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:44595, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,574 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,576 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:32:10,578 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:32:10,580 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:32:10,580 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:32:10,580 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,44595,1732188729732-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:10,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1284b092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:10,597 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,44595,-1 for getting cluster id 2024-11-21T11:32:10,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:32:10,599 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc7a0a56-4003-42c7-b38d-adf4cecbc3c6' 2024-11-21T11:32:10,599 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:32:10,599 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc7a0a56-4003-42c7-b38d-adf4cecbc3c6" 2024-11-21T11:32:10,600 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a7b5486, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:10,600 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,44595,-1] 2024-11-21T11:32:10,600 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:32:10,600 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:10,601 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51488, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:32:10,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dea9c62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:10,602 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:32:10,603 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,44605,1732188729784, seqNum=-1] 2024-11-21T11:32:10,603 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:32:10,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:32:10,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,44595,1732188729732 2024-11-21T11:32:10,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:10,609 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:32:10,609 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T11:32:10,610 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b462513bfc2,44595,1732188729732 2024-11-21T11:32:10,610 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@492704b6 2024-11-21T11:32:10,610 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T11:32:10,611 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51490, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T11:32:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-21T11:32:10,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-21T11:32:10,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:32:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-21T11:32:10,615 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T11:32:10,615 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-21T11:32:10,616 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T11:32:10,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:32:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741835_1011 (size=381) 2024-11-21T11:32:10,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741835_1011 (size=381) 2024-11-21T11:32:10,625 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1c5b168b1cd5495e81cd9c96a67dc062, NAME => 'TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41 2024-11-21T11:32:10,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741836_1012 (size=64) 2024-11-21T11:32:10,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741836_1012 (size=64) 2024-11-21T11:32:10,632 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:10,632 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1c5b168b1cd5495e81cd9c96a67dc062, disabling compactions & flushes 2024-11-21T11:32:10,633 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,633 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,633 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. after waiting 0 ms 2024-11-21T11:32:10,633 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,633 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,633 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1c5b168b1cd5495e81cd9c96a67dc062: Waiting for close lock at 1732188730632Disabling compacts and flushes for region at 1732188730632Disabling writes for close at 1732188730633 (+1 ms)Writing region close event to WAL at 1732188730633Closed at 1732188730633 2024-11-21T11:32:10,634 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T11:32:10,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732188730634"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188730634"}]},"ts":"1732188730634"} 2024-11-21T11:32:10,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T11:32:10,638 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T11:32:10,638 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188730638"}]},"ts":"1732188730638"} 2024-11-21T11:32:10,641 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-21T11:32:10,641 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, ASSIGN}] 2024-11-21T11:32:10,642 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, ASSIGN 2024-11-21T11:32:10,644 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, ASSIGN; state=OFFLINE, location=7b462513bfc2,44605,1732188729784; forceNewPlan=false, retain=false 2024-11-21T11:32:10,794 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1c5b168b1cd5495e81cd9c96a67dc062, regionState=OPENING, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, ASSIGN because future has completed 2024-11-21T11:32:10,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784}] 2024-11-21T11:32:10,954 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,954 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1c5b168b1cd5495e81cd9c96a67dc062, NAME => 'TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:32:10,955 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,955 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:10,955 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,955 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,956 INFO [StoreOpener-1c5b168b1cd5495e81cd9c96a67dc062-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,957 INFO [StoreOpener-1c5b168b1cd5495e81cd9c96a67dc062-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c5b168b1cd5495e81cd9c96a67dc062 columnFamilyName info 2024-11-21T11:32:10,958 DEBUG [StoreOpener-1c5b168b1cd5495e81cd9c96a67dc062-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:10,958 INFO [StoreOpener-1c5b168b1cd5495e81cd9c96a67dc062-1 {}] regionserver.HStore(327): Store=1c5b168b1cd5495e81cd9c96a67dc062/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:10,958 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,959 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,959 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,959 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,959 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,961 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,962 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:32:10,963 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1c5b168b1cd5495e81cd9c96a67dc062; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723253, jitterRate=-0.08033709228038788}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:32:10,963 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:10,963 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1c5b168b1cd5495e81cd9c96a67dc062: Running coprocessor pre-open hook at 1732188730955Writing region info on filesystem at 1732188730955Initializing all the Stores at 1732188730956 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188730956Cleaning up temporary data from old regions at 1732188730959 (+3 ms)Running coprocessor post-open hooks at 1732188730963 (+4 ms)Region opened successfully at 1732188730963 2024-11-21T11:32:10,964 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., pid=6, masterSystemTime=1732188730950 2024-11-21T11:32:10,966 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,966 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:10,967 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1c5b168b1cd5495e81cd9c96a67dc062, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:10,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784 because future has completed 2024-11-21T11:32:10,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T11:32:10,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784 in 173 msec 2024-11-21T11:32:10,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T11:32:10,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, ASSIGN in 331 msec 2024-11-21T11:32:10,975 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T11:32:10,976 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732188730975"}]},"ts":"1732188730975"} 2024-11-21T11:32:10,977 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-21T11:32:10,978 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T11:32:10,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 366 msec 2024-11-21T11:32:11,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:11,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:12,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:12,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:13,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:13,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:13,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,476 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,985 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:32:13,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:13,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,019 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:14,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:14,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:15,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:15,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:16,024 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T11:32:16,025 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-21T11:32:16,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:16,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:17,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:17,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:18,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:18,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:18,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-21T11:32:18,293 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-21T11:32:18,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-21T11:32:19,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:19,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:20,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:20,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:20,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44595 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T11:32:20,699 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-21T11:32:20,699 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-21T11:32:20,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-21T11:32:20,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:20,704 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., hostname=7b462513bfc2,44605,1732188729784, seqNum=2] 2024-11-21T11:32:20,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:20,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:32:20,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/7ddfa949054746708b3436b641050190 is 1080, key is row0001/info:/1732188740705/Put/seqid=0 2024-11-21T11:32:20,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741837_1013 (size=12509) 2024-11-21T11:32:20,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741837_1013 (size=12509) 2024-11-21T11:32:20,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/7ddfa949054746708b3436b641050190 2024-11-21T11:32:20,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/7ddfa949054746708b3436b641050190 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190 2024-11-21T11:32:20,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190, entries=7, sequenceid=11, filesize=12.2 K 2024-11-21T11:32:20,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 1c5b168b1cd5495e81cd9c96a67dc062 in 41ms, sequenceid=11, compaction requested=false 2024-11-21T11:32:20,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:20,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:20,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-21T11:32:20,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/dd8a8bb6a97b422f9587b8a83f012efe is 1080, key is row0008/info:/1732188740717/Put/seqid=0 2024-11-21T11:32:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741838_1014 (size=28684) 2024-11-21T11:32:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741838_1014 (size=28684) 2024-11-21T11:32:20,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/dd8a8bb6a97b422f9587b8a83f012efe 2024-11-21T11:32:20,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/dd8a8bb6a97b422f9587b8a83f012efe as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe 2024-11-21T11:32:20,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe, entries=22, sequenceid=36, filesize=28.0 K 2024-11-21T11:32:20,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 1c5b168b1cd5495e81cd9c96a67dc062 in 20ms, sequenceid=36, compaction requested=false 2024-11-21T11:32:20,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:20,779 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-21T11:32:20,779 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:20,779 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe because midkey is the same as first or last row 2024-11-21T11:32:21,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:21,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:22,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:22,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:22,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:32:22,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/f0a5bc3db9f5411592468e5f6547c4cc is 1080, key is row0030/info:/1732188740759/Put/seqid=0 2024-11-21T11:32:22,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741839_1015 (size=12509) 2024-11-21T11:32:22,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741839_1015 (size=12509) 2024-11-21T11:32:22,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/f0a5bc3db9f5411592468e5f6547c4cc 2024-11-21T11:32:22,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/f0a5bc3db9f5411592468e5f6547c4cc as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc 2024-11-21T11:32:22,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc, entries=7, sequenceid=46, filesize=12.2 K 2024-11-21T11:32:22,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 1c5b168b1cd5495e81cd9c96a67dc062 in 21ms, sequenceid=46, compaction requested=true 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe because midkey is the same as first or last row 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c5b168b1cd5495e81cd9c96a67dc062:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:22,793 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:22,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:22,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-21T11:32:22,795 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:22,795 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): 1c5b168b1cd5495e81cd9c96a67dc062/info is initiating minor compaction (all files) 2024-11-21T11:32:22,795 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1c5b168b1cd5495e81cd9c96a67dc062/info in TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:22,795 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp, totalSize=52.4 K 2024-11-21T11:32:22,795 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ddfa949054746708b3436b641050190, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732188740705 2024-11-21T11:32:22,796 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting dd8a8bb6a97b422f9587b8a83f012efe, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1732188740717 2024-11-21T11:32:22,796 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting f0a5bc3db9f5411592468e5f6547c4cc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732188740759 2024-11-21T11:32:22,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1099878c9a424b69bba01b80cfd6ff40 is 1080, key is row0037/info:/1732188742773/Put/seqid=0 2024-11-21T11:32:22,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741840_1016 (size=18987) 2024-11-21T11:32:22,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741840_1016 (size=18987) 2024-11-21T11:32:22,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=62 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1099878c9a424b69bba01b80cfd6ff40 2024-11-21T11:32:22,813 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c5b168b1cd5495e81cd9c96a67dc062#info#compaction#58 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:22,813 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/aefb0b04d8e740fbb1b5d0ccea49d730 is 1080, key is row0001/info:/1732188740705/Put/seqid=0 2024-11-21T11:32:22,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1099878c9a424b69bba01b80cfd6ff40 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40 2024-11-21T11:32:22,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741841_1017 (size=43901) 2024-11-21T11:32:22,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741841_1017 (size=43901) 2024-11-21T11:32:22,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40, entries=13, sequenceid=62, filesize=18.5 K 2024-11-21T11:32:22,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=15.76 KB/16140 for 1c5b168b1cd5495e81cd9c96a67dc062 in 33ms, sequenceid=62, compaction requested=false 2024-11-21T11:32:22,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:22,827 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.0 K, sizeToCheck=16.0 K 2024-11-21T11:32:22,827 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:22,827 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe because midkey is the same as first or last row 2024-11-21T11:32:22,828 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/aefb0b04d8e740fbb1b5d0ccea49d730 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 2024-11-21T11:32:22,834 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1c5b168b1cd5495e81cd9c96a67dc062/info of 1c5b168b1cd5495e81cd9c96a67dc062 into aefb0b04d8e740fbb1b5d0ccea49d730(size=42.9 K), total size for store is 61.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:22,834 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:22,834 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., storeName=1c5b168b1cd5495e81cd9c96a67dc062/info, priority=13, startTime=1732188742793; duration=0sec 2024-11-21T11:32:22,834 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-21T11:32:22,834 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:22,834 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 because midkey is the same as first or last row 2024-11-21T11:32:22,834 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 because midkey is the same as first or last row 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=61.4 K, sizeToCheck=16.0 K 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 because midkey is the same as first or last row 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:22,835 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c5b168b1cd5495e81cd9c96a67dc062:info 2024-11-21T11:32:23,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:23,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:23,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:32:23,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,817 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:23,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:24,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:24,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:24,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-21T11:32:24,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/ac8c64adacfd40b182cc3414b2a00188 is 1080, key is row0050/info:/1732188742795/Put/seqid=0 2024-11-21T11:32:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741842_1018 (size=22222) 2024-11-21T11:32:24,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741842_1018 (size=22222) 2024-11-21T11:32:24,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/ac8c64adacfd40b182cc3414b2a00188 2024-11-21T11:32:24,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/ac8c64adacfd40b182cc3414b2a00188 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188 2024-11-21T11:32:24,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188, entries=16, sequenceid=82, filesize=21.7 K 2024-11-21T11:32:24,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 1c5b168b1cd5495e81cd9c96a67dc062 in 25ms, sequenceid=82, compaction requested=true 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 because midkey is the same as first or last row 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1c5b168b1cd5495e81cd9c96a67dc062:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:24,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:24,850 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:24,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:24,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-21T11:32:24,852 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85110 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:24,852 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): 1c5b168b1cd5495e81cd9c96a67dc062/info is initiating minor compaction (all files) 2024-11-21T11:32:24,852 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1c5b168b1cd5495e81cd9c96a67dc062/info in TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:24,852 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp, totalSize=83.1 K 2024-11-21T11:32:24,852 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting aefb0b04d8e740fbb1b5d0ccea49d730, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1732188740705 2024-11-21T11:32:24,853 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1099878c9a424b69bba01b80cfd6ff40, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=62, earliestPutTs=1732188742773 2024-11-21T11:32:24,853 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting ac8c64adacfd40b182cc3414b2a00188, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732188742795 2024-11-21T11:32:24,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1bbcdf02c19945aa9b0e2e1cf109e951 is 1080, key is row0066/info:/1732188744827/Put/seqid=0 2024-11-21T11:32:24,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741843_1019 (size=20064) 2024-11-21T11:32:24,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741843_1019 (size=20064) 2024-11-21T11:32:24,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1bbcdf02c19945aa9b0e2e1cf109e951 2024-11-21T11:32:24,869 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1c5b168b1cd5495e81cd9c96a67dc062#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:24,869 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/5ce7938b7fc548e0abbf3d19a9298299 is 1080, key is row0001/info:/1732188740705/Put/seqid=0 2024-11-21T11:32:24,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/1bbcdf02c19945aa9b0e2e1cf109e951 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1bbcdf02c19945aa9b0e2e1cf109e951 2024-11-21T11:32:24,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1bbcdf02c19945aa9b0e2e1cf109e951, entries=14, sequenceid=99, filesize=19.6 K 2024-11-21T11:32:24,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 1c5b168b1cd5495e81cd9c96a67dc062 in 27ms, sequenceid=99, compaction requested=false 2024-11-21T11:32:24,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:24,878 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,878 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,878 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 because midkey is the same as first or last row 2024-11-21T11:32:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:24,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-21T11:32:24,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/4fd2a2eda1e046b6aab020f5b21a170f is 1080, key is row0080/info:/1732188744852/Put/seqid=0 2024-11-21T11:32:24,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741844_1020 (size=75378) 2024-11-21T11:32:24,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741844_1020 (size=75378) 2024-11-21T11:32:24,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741845_1021 (size=20064) 2024-11-21T11:32:24,890 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/5ce7938b7fc548e0abbf3d19a9298299 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299 2024-11-21T11:32:24,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741845_1021 (size=20064) 2024-11-21T11:32:24,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/4fd2a2eda1e046b6aab020f5b21a170f 2024-11-21T11:32:24,897 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1c5b168b1cd5495e81cd9c96a67dc062/info of 1c5b168b1cd5495e81cd9c96a67dc062 into 5ce7938b7fc548e0abbf3d19a9298299(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:24,897 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., storeName=1c5b168b1cd5495e81cd9c96a67dc062/info, priority=13, startTime=1732188744850; duration=0sec 2024-11-21T11:32:24,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/4fd2a2eda1e046b6aab020f5b21a170f as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/4fd2a2eda1e046b6aab020f5b21a170f 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,897 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,898 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:24,898 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:24,898 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1c5b168b1cd5495e81cd9c96a67dc062:info 2024-11-21T11:32:24,899 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] assignment.AssignmentManager(1363): Split request from 7b462513bfc2,44605,1732188729784, parent={ENCODED => 1c5b168b1cd5495e81cd9c96a67dc062, NAME => 'TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-21T11:32:24,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/4fd2a2eda1e046b6aab020f5b21a170f, entries=14, sequenceid=116, filesize=19.6 K 2024-11-21T11:32:24,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for 1c5b168b1cd5495e81cd9c96a67dc062 in 23ms, sequenceid=116, compaction requested=true 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1c5b168b1cd5495e81cd9c96a67dc062: 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-21T11:32:24,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-21T11:32:24,906 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:24,911 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=077e0264a169ebfd5457c142719e92ea, daughterB=e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:24,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=077e0264a169ebfd5457c142719e92ea, daughterB=e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:24,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=077e0264a169ebfd5457c142719e92ea, daughterB=e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:24,912 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=077e0264a169ebfd5457c142719e92ea, daughterB=e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:24,913 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] assignment.AssignmentManager(1363): Split request from 7b462513bfc2,44605,1732188729784, parent={ENCODED => 1c5b168b1cd5495e81cd9c96a67dc062, NAME => 'TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-21T11:32:24,914 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:24,915 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44595 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 2024-11-21T11:32:24,916 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 held by pid=7 2024-11-21T11:32:24,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, UNASSIGN}] 2024-11-21T11:32:24,925 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-21T11:32:24,925 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, UNASSIGN 2024-11-21T11:32:24,925 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 2024-11-21T11:32:24,926 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1c5b168b1cd5495e81cd9c96a67dc062, regionState=CLOSING, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:24,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, UNASSIGN because future has completed 2024-11-21T11:32:24,929 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-21T11:32:24,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784}] 2024-11-21T11:32:25,087 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,087 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-21T11:32:25,088 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 1c5b168b1cd5495e81cd9c96a67dc062, disabling compactions & flushes 2024-11-21T11:32:25,088 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:25,088 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:25,088 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. after waiting 0 ms 2024-11-21T11:32:25,088 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:25,088 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 1c5b168b1cd5495e81cd9c96a67dc062 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-21T11:32:25,092 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 is 1080, key is row0094/info:/1732188744880/Put/seqid=0 2024-11-21T11:32:25,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:25,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741846_1022 (size=8193) 2024-11-21T11:32:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741846_1022 (size=8193) 2024-11-21T11:32:25,098 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 2024-11-21T11:32:25,103 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/.tmp/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 2024-11-21T11:32:25,109 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7, entries=3, sequenceid=123, filesize=8.0 K 2024-11-21T11:32:25,110 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1c5b168b1cd5495e81cd9c96a67dc062 in 22ms, sequenceid=123, compaction requested=true 2024-11-21T11:32:25,111 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188] to archive 2024-11-21T11:32:25,112 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:32:25,113 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/7ddfa949054746708b3436b641050190 2024-11-21T11:32:25,115 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/dd8a8bb6a97b422f9587b8a83f012efe 2024-11-21T11:32:25,116 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/aefb0b04d8e740fbb1b5d0ccea49d730 2024-11-21T11:32:25,117 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/f0a5bc3db9f5411592468e5f6547c4cc 2024-11-21T11:32:25,118 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1099878c9a424b69bba01b80cfd6ff40 2024-11-21T11:32:25,119 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/ac8c64adacfd40b182cc3414b2a00188 2024-11-21T11:32:25,125 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-21T11:32:25,125 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. 2024-11-21T11:32:25,125 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 1c5b168b1cd5495e81cd9c96a67dc062: Waiting for close lock at 1732188745088Running coprocessor pre-close hooks at 1732188745088Disabling compacts and flushes for region at 1732188745088Disabling writes for close at 1732188745088Obtaining lock to block concurrent updates at 1732188745088Preparing flush snapshotting stores in 1c5b168b1cd5495e81cd9c96a67dc062 at 1732188745088Finished memstore snapshotting TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732188745088Flushing stores of TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. at 1732188745089 (+1 ms)Flushing 1c5b168b1cd5495e81cd9c96a67dc062/info: creating writer at 1732188745089Flushing 1c5b168b1cd5495e81cd9c96a67dc062/info: appending metadata at 1732188745092 (+3 ms)Flushing 1c5b168b1cd5495e81cd9c96a67dc062/info: closing flushed file at 1732188745092Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3328e204: reopening flushed file at 1732188745103 (+11 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1c5b168b1cd5495e81cd9c96a67dc062 in 22ms, sequenceid=123, compaction requested=true at 1732188745110 (+7 ms)Writing region close event to WAL at 1732188745122 (+12 ms)Running coprocessor post-close hooks at 1732188745125 (+3 ms)Closed at 1732188745125 2024-11-21T11:32:25,128 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,128 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1c5b168b1cd5495e81cd9c96a67dc062, regionState=CLOSED 2024-11-21T11:32:25,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784 because future has completed 2024-11-21T11:32:25,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T11:32:25,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 1c5b168b1cd5495e81cd9c96a67dc062, server=7b462513bfc2,44605,1732188729784 in 201 msec 2024-11-21T11:32:25,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-21T11:32:25,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1c5b168b1cd5495e81cd9c96a67dc062, UNASSIGN in 212 msec 2024-11-21T11:32:25,144 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:25,147 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=1c5b168b1cd5495e81cd9c96a67dc062, threads=4 2024-11-21T11:32:25,150 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,150 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/4fd2a2eda1e046b6aab020f5b21a170f for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,150 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,150 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1bbcdf02c19945aa9b0e2e1cf109e951 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,168 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1bbcdf02c19945aa9b0e2e1cf109e951, top=true 2024-11-21T11:32:25,169 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/4fd2a2eda1e046b6aab020f5b21a170f, top=true 2024-11-21T11:32:25,169 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7, top=true 2024-11-21T11:32:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741847_1023 (size=27) 2024-11-21T11:32:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741847_1023 (size=27) 2024-11-21T11:32:25,179 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f for child: e186dc33dd4cd397c5a22e8f3fdc3eae, parent: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,179 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/4fd2a2eda1e046b6aab020f5b21a170f for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,180 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7 for child: e186dc33dd4cd397c5a22e8f3fdc3eae, parent: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,180 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951 for child: e186dc33dd4cd397c5a22e8f3fdc3eae, parent: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,180 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/d8ff1c4c7f1b46d0861ade7da6b1e4f7 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,180 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/1bbcdf02c19945aa9b0e2e1cf109e951 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741848_1024 (size=27) 2024-11-21T11:32:25,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741848_1024 (size=27) 2024-11-21T11:32:25,185 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299 for region: 1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:25,187 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 1c5b168b1cd5495e81cd9c96a67dc062 Daughter A: [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062] storefiles, Daughter B: [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7] storefiles. 2024-11-21T11:32:25,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741849_1025 (size=71) 2024-11-21T11:32:25,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741849_1025 (size=71) 2024-11-21T11:32:25,197 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:25,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741850_1026 (size=71) 2024-11-21T11:32:25,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741850_1026 (size=71) 2024-11-21T11:32:25,210 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:25,222 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-21T11:32:25,225 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-21T11:32:25,227 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732188745227"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732188745227"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732188745227"}]},"ts":"1732188745227"} 2024-11-21T11:32:25,227 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732188745227"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188745227"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732188745227"}]},"ts":"1732188745227"} 2024-11-21T11:32:25,227 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732188745227"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732188745227"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732188745227"}]},"ts":"1732188745227"} 2024-11-21T11:32:25,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=077e0264a169ebfd5457c142719e92ea, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e186dc33dd4cd397c5a22e8f3fdc3eae, ASSIGN}] 2024-11-21T11:32:25,247 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=077e0264a169ebfd5457c142719e92ea, ASSIGN 2024-11-21T11:32:25,247 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e186dc33dd4cd397c5a22e8f3fdc3eae, ASSIGN 2024-11-21T11:32:25,247 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=077e0264a169ebfd5457c142719e92ea, ASSIGN; state=SPLITTING_NEW, location=7b462513bfc2,44605,1732188729784; forceNewPlan=false, retain=false 2024-11-21T11:32:25,248 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e186dc33dd4cd397c5a22e8f3fdc3eae, ASSIGN; state=SPLITTING_NEW, location=7b462513bfc2,44605,1732188729784; forceNewPlan=false, retain=false 2024-11-21T11:32:25,398 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=e186dc33dd4cd397c5a22e8f3fdc3eae, regionState=OPENING, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:25,398 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=077e0264a169ebfd5457c142719e92ea, regionState=OPENING, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:25,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e186dc33dd4cd397c5a22e8f3fdc3eae, ASSIGN because future has completed 2024-11-21T11:32:25,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure e186dc33dd4cd397c5a22e8f3fdc3eae, server=7b462513bfc2,44605,1732188729784}] 2024-11-21T11:32:25,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=077e0264a169ebfd5457c142719e92ea, ASSIGN because future has completed 2024-11-21T11:32:25,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 077e0264a169ebfd5457c142719e92ea, server=7b462513bfc2,44605,1732188729784}] 2024-11-21T11:32:25,557 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:25,557 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 077e0264a169ebfd5457c142719e92ea, NAME => 'TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-21T11:32:25,558 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,558 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:25,558 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,558 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,559 INFO [StoreOpener-077e0264a169ebfd5457c142719e92ea-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,560 INFO [StoreOpener-077e0264a169ebfd5457c142719e92ea-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 077e0264a169ebfd5457c142719e92ea columnFamilyName info 2024-11-21T11:32:25,560 DEBUG [StoreOpener-077e0264a169ebfd5457c142719e92ea-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:25,571 DEBUG [StoreOpener-077e0264a169ebfd5457c142719e92ea-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-bottom 2024-11-21T11:32:25,572 INFO [StoreOpener-077e0264a169ebfd5457c142719e92ea-1 {}] regionserver.HStore(327): Store=077e0264a169ebfd5457c142719e92ea/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:25,572 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,573 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,574 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,575 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,575 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,577 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,578 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 077e0264a169ebfd5457c142719e92ea; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851584, jitterRate=0.08284510672092438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:32:25,578 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:25,579 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 077e0264a169ebfd5457c142719e92ea: Running coprocessor pre-open hook at 1732188745558Writing region info on filesystem at 1732188745558Initializing all the Stores at 1732188745559 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188745559Cleaning up temporary data from old regions at 1732188745575 (+16 ms)Running coprocessor post-open hooks at 1732188745578 (+3 ms)Region opened successfully at 1732188745579 (+1 ms) 2024-11-21T11:32:25,580 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea., pid=14, masterSystemTime=1732188745553 2024-11-21T11:32:25,580 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 077e0264a169ebfd5457c142719e92ea:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:25,580 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:25,580 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-21T11:32:25,581 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:25,581 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): 077e0264a169ebfd5457c142719e92ea/info is initiating minor compaction (all files) 2024-11-21T11:32:25,581 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 077e0264a169ebfd5457c142719e92ea/info in TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:25,581 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-bottom] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/.tmp, totalSize=73.6 K 2024-11-21T11:32:25,582 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732188740705 2024-11-21T11:32:25,583 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:25,583 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:25,583 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:25,583 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => e186dc33dd4cd397c5a22e8f3fdc3eae, NAME => 'TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-21T11:32:25,583 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,583 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:25,583 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=077e0264a169ebfd5457c142719e92ea, regionState=OPEN, openSeqNum=127, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:25,584 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,584 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,585 INFO [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,585 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-21T11:32:25,585 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-21T11:32:25,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-21T11:32:25,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 077e0264a169ebfd5457c142719e92ea, server=7b462513bfc2,44605,1732188729784 because future has completed 2024-11-21T11:32:25,586 INFO [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e186dc33dd4cd397c5a22e8f3fdc3eae columnFamilyName info 2024-11-21T11:32:25,586 DEBUG [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:25,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-21T11:32:25,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 077e0264a169ebfd5457c142719e92ea, server=7b462513bfc2,44605,1732188729784 in 185 msec 2024-11-21T11:32:25,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=077e0264a169ebfd5457c142719e92ea, ASSIGN in 345 msec 2024-11-21T11:32:25,603 DEBUG [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-top 2024-11-21T11:32:25,609 DEBUG [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951 2024-11-21T11:32:25,609 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 077e0264a169ebfd5457c142719e92ea#info#compaction#64 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:25,610 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/.tmp/info/9964bfae5e3341afbc27aad907377380 is 1080, key is row0001/info:/1732188740705/Put/seqid=0 2024-11-21T11:32:25,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/ac547f948ab241cdbe43300f292af631 is 193, key is TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae./info:regioninfo/1732188745398/Put/seqid=0 2024-11-21T11:32:25,614 DEBUG [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f 2024-11-21T11:32:25,619 DEBUG [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7 2024-11-21T11:32:25,620 INFO [StoreOpener-e186dc33dd4cd397c5a22e8f3fdc3eae-1 {}] regionserver.HStore(327): Store=e186dc33dd4cd397c5a22e8f3fdc3eae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:25,620 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,621 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741851_1027 (size=70862) 2024-11-21T11:32:25,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741851_1027 (size=70862) 2024-11-21T11:32:25,622 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,623 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,623 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,625 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,627 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened e186dc33dd4cd397c5a22e8f3fdc3eae; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698601, jitterRate=-0.11168351769447327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T11:32:25,627 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:25,627 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for e186dc33dd4cd397c5a22e8f3fdc3eae: Running coprocessor pre-open hook at 1732188745584Writing region info on filesystem at 1732188745584Initializing all the Stores at 1732188745585 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188745585Cleaning up temporary data from old regions at 1732188745623 (+38 ms)Running coprocessor post-open hooks at 1732188745627 (+4 ms)Region opened successfully at 1732188745627 2024-11-21T11:32:25,628 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., pid=13, masterSystemTime=1732188745553 2024-11-21T11:32:25,628 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 2 2024-11-21T11:32:25,628 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-21T11:32:25,629 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-21T11:32:25,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741852_1028 (size=9847) 2024-11-21T11:32:25,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741852_1028 (size=9847) 2024-11-21T11:32:25,630 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/.tmp/info/9964bfae5e3341afbc27aad907377380 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/9964bfae5e3341afbc27aad907377380 2024-11-21T11:32:25,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/ac547f948ab241cdbe43300f292af631 2024-11-21T11:32:25,631 DEBUG [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:25,631 INFO [RS_OPEN_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:25,632 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:25,632 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:25,632 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:25,632 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-top, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=120.8 K 2024-11-21T11:32:25,633 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=e186dc33dd4cd397c5a22e8f3fdc3eae, regionState=OPEN, openSeqNum=127, regionLocation=7b462513bfc2,44605,1732188729784 2024-11-21T11:32:25,633 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] compactions.Compactor(225): Compacting 5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1732188740705 2024-11-21T11:32:25,634 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1732188744827 2024-11-21T11:32:25,635 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732188744852 2024-11-21T11:32:25,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure e186dc33dd4cd397c5a22e8f3fdc3eae, server=7b462513bfc2,44605,1732188729784 because future has completed 2024-11-21T11:32:25,636 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732188744880 2024-11-21T11:32:25,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T11:32:25,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure e186dc33dd4cd397c5a22e8f3fdc3eae, server=7b462513bfc2,44605,1732188729784 in 237 msec 2024-11-21T11:32:25,643 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 077e0264a169ebfd5457c142719e92ea/info of 077e0264a169ebfd5457c142719e92ea into 9964bfae5e3341afbc27aad907377380(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:25,643 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 077e0264a169ebfd5457c142719e92ea: 2024-11-21T11:32:25,643 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea., storeName=077e0264a169ebfd5457c142719e92ea/info, priority=15, startTime=1732188745580; duration=0sec 2024-11-21T11:32:25,643 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:25,643 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 077e0264a169ebfd5457c142719e92ea:info 2024-11-21T11:32:25,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-21T11:32:25,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e186dc33dd4cd397c5a22e8f3fdc3eae, ASSIGN in 397 msec 2024-11-21T11:32:25,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=077e0264a169ebfd5457c142719e92ea, daughterB=e186dc33dd4cd397c5a22e8f3fdc3eae in 739 msec 2024-11-21T11:32:25,648 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 2024-11-21T11:32:25,648 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 2024-11-21T11:32:25,648 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 2024-11-21T11:32:25,649 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 1c5b168b1cd5495e81cd9c96a67dc062, NAME => 'TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-21T11:32:25,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=1c5b168b1cd5495e81cd9c96a67dc062, daughterA=8b3f070e740c7e2423006666b6f38577, daughterB=2809c77fd4f2166f5c0cd31d9366fbd2 in 734 msec 2024-11-21T11:32:25,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/ns/0f3e09a7ebfa4ef6bd3dee98a7cc54c3 is 43, key is default/ns:d/1732188730567/Put/seqid=0 2024-11-21T11:32:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741853_1029 (size=5153) 2024-11-21T11:32:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741853_1029 (size=5153) 2024-11-21T11:32:25,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/ns/0f3e09a7ebfa4ef6bd3dee98a7cc54c3 2024-11-21T11:32:25,683 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#67 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:25,684 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/397fc281473c475099657aad269e4639 is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:25,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741854_1030 (size=43081) 2024-11-21T11:32:25,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741854_1030 (size=43081) 2024-11-21T11:32:25,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/table/e67997f839744362bead35e204b7a387 is 65, key is TestLogRolling-testLogRolling/table:state/1732188730975/Put/seqid=0 2024-11-21T11:32:25,695 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/397fc281473c475099657aad269e4639 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/397fc281473c475099657aad269e4639 2024-11-21T11:32:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741855_1031 (size=5340) 2024-11-21T11:32:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741855_1031 (size=5340) 2024-11-21T11:32:25,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/table/e67997f839744362bead35e204b7a387 2024-11-21T11:32:25,702 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into 397fc281473c475099657aad269e4639(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:25,702 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:25,702 INFO [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=12, startTime=1732188745628; duration=0sec 2024-11-21T11:32:25,702 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:25,702 DEBUG [RS:0;7b462513bfc2:44605-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:25,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/ac547f948ab241cdbe43300f292af631 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/info/ac547f948ab241cdbe43300f292af631 2024-11-21T11:32:25,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/info/ac547f948ab241cdbe43300f292af631, entries=30, sequenceid=17, filesize=9.6 K 2024-11-21T11:32:25,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/ns/0f3e09a7ebfa4ef6bd3dee98a7cc54c3 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/ns/0f3e09a7ebfa4ef6bd3dee98a7cc54c3 2024-11-21T11:32:25,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/ns/0f3e09a7ebfa4ef6bd3dee98a7cc54c3, entries=2, sequenceid=17, filesize=5.0 K 2024-11-21T11:32:25,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/table/e67997f839744362bead35e204b7a387 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/table/e67997f839744362bead35e204b7a387 2024-11-21T11:32:25,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/table/e67997f839744362bead35e204b7a387, entries=2, sequenceid=17, filesize=5.2 K 2024-11-21T11:32:25,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 138ms, sequenceid=17, compaction requested=false 2024-11-21T11:32:25,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-21T11:32:26,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:26,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:36334 deadline: 1732188756885, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. is not online on 7b462513bfc2,44605,1732188729784 2024-11-21T11:32:26,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., hostname=7b462513bfc2,44605,1732188729784, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., hostname=7b462513bfc2,44605,1732188729784, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. is not online on 7b462513bfc2,44605,1732188729784 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T11:32:26,911 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., hostname=7b462513bfc2,44605,1732188729784, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062. is not online on 7b462513bfc2,44605,1732188729784 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T11:32:26,912 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732188730611.1c5b168b1cd5495e81cd9c96a67dc062., hostname=7b462513bfc2,44605,1732188729784, seqNum=2 from cache 2024-11-21T11:32:27,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:27,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:28,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:28,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:29,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:29,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:30,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:30,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:30,628 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T11:32:30,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:30,661 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T11:32:31,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:31,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:32,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:32,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:33,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:33,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:34,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:34,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:35,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:35,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:36,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:36,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:37,026 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., hostname=7b462513bfc2,44605,1732188729784, seqNum=127] 2024-11-21T11:32:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:37,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:32:37,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a63c72de74814e0fa6bef0706357e2b3 is 1080, key is row0097/info:/1732188757027/Put/seqid=0 2024-11-21T11:32:37,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741856_1032 (size=12516) 2024-11-21T11:32:37,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741856_1032 (size=12516) 2024-11-21T11:32:37,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a63c72de74814e0fa6bef0706357e2b3 2024-11-21T11:32:37,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a63c72de74814e0fa6bef0706357e2b3 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3 2024-11-21T11:32:37,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3, entries=7, sequenceid=137, filesize=12.2 K 2024-11-21T11:32:37,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for e186dc33dd4cd397c5a22e8f3fdc3eae in 23ms, sequenceid=137, compaction requested=false 2024-11-21T11:32:37,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:37,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-21T11:32:37,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/86f4f88f9f3f4b2db606dd6cb432d609 is 1080, key is row0104/info:/1732188757039/Put/seqid=0 2024-11-21T11:32:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741857_1033 (size=22238) 2024-11-21T11:32:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741857_1033 (size=22238) 2024-11-21T11:32:37,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/86f4f88f9f3f4b2db606dd6cb432d609 2024-11-21T11:32:37,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/86f4f88f9f3f4b2db606dd6cb432d609 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609 2024-11-21T11:32:37,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609, entries=16, sequenceid=156, filesize=21.7 K 2024-11-21T11:32:37,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for e186dc33dd4cd397c5a22e8f3fdc3eae in 26ms, sequenceid=156, compaction requested=true 2024-11-21T11:32:37,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:37,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:37,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:37,088 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:37,089 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:37,089 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:37,089 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:37,090 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/397fc281473c475099657aad269e4639, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=76.0 K 2024-11-21T11:32:37,090 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 397fc281473c475099657aad269e4639, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1732188742817 2024-11-21T11:32:37,090 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting a63c72de74814e0fa6bef0706357e2b3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732188757027 2024-11-21T11:32:37,090 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86f4f88f9f3f4b2db606dd6cb432d609, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732188757039 2024-11-21T11:32:37,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:37,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:37,100 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#71 average throughput is 59.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:37,101 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/3f7418b70d624e2db4b06869d29d108a is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:37,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741858_1034 (size=68045) 2024-11-21T11:32:37,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741858_1034 (size=68045) 2024-11-21T11:32:37,111 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/3f7418b70d624e2db4b06869d29d108a as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/3f7418b70d624e2db4b06869d29d108a 2024-11-21T11:32:37,117 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into 3f7418b70d624e2db4b06869d29d108a(size=66.5 K), total size for store is 66.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:37,117 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:37,117 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188757088; duration=0sec 2024-11-21T11:32:37,118 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:37,118 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:38,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:38,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:39,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-21T11:32:39,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/484b1fd047144d65b806bc0a48cd75c6 is 1080, key is row0120/info:/1732188757063/Put/seqid=0 2024-11-21T11:32:39,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741859_1035 (size=15750) 2024-11-21T11:32:39,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741859_1035 (size=15750) 2024-11-21T11:32:39,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/484b1fd047144d65b806bc0a48cd75c6 2024-11-21T11:32:39,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/484b1fd047144d65b806bc0a48cd75c6 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6 2024-11-21T11:32:39,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6, entries=10, sequenceid=170, filesize=15.4 K 2024-11-21T11:32:39,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=13.66 KB/13988 for e186dc33dd4cd397c5a22e8f3fdc3eae in 21ms, sequenceid=170, compaction requested=false 2024-11-21T11:32:39,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:39,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:39,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:39,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-21T11:32:39,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/1f3472331ae3428086283ea8ae3e0644 is 1080, key is row0130/info:/1732188759080/Put/seqid=0 2024-11-21T11:32:39,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741860_1036 (size=20078) 2024-11-21T11:32:39,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741860_1036 (size=20078) 2024-11-21T11:32:39,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/1f3472331ae3428086283ea8ae3e0644 2024-11-21T11:32:39,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/1f3472331ae3428086283ea8ae3e0644 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644 2024-11-21T11:32:39,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644, entries=14, sequenceid=187, filesize=19.6 K 2024-11-21T11:32:39,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for e186dc33dd4cd397c5a22e8f3fdc3eae in 25ms, sequenceid=187, compaction requested=true 2024-11-21T11:32:39,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:39,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:39,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:39,128 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:39,129 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:39,129 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:39,129 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:39,129 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/3f7418b70d624e2db4b06869d29d108a, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=101.4 K 2024-11-21T11:32:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:39,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-21T11:32:39,129 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f7418b70d624e2db4b06869d29d108a, keycount=58, bloomtype=ROW, size=66.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732188742817 2024-11-21T11:32:39,132 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 484b1fd047144d65b806bc0a48cd75c6, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732188757063 2024-11-21T11:32:39,132 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f3472331ae3428086283ea8ae3e0644, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732188759080 2024-11-21T11:32:39,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/e68cf272b06840868052f86e27034aa9 is 1080, key is row0144/info:/1732188759103/Put/seqid=0 2024-11-21T11:32:39,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741861_1037 (size=22238) 2024-11-21T11:32:39,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741861_1037 (size=22238) 2024-11-21T11:32:39,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/e68cf272b06840868052f86e27034aa9 2024-11-21T11:32:39,147 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#75 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:39,148 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/f4fc445f981e469cb406a5f47b31a162 is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:39,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/e68cf272b06840868052f86e27034aa9 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9 2024-11-21T11:32:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741862_1038 (size=94096) 2024-11-21T11:32:39,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741862_1038 (size=94096) 2024-11-21T11:32:39,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9, entries=16, sequenceid=206, filesize=21.7 K 2024-11-21T11:32:39,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for e186dc33dd4cd397c5a22e8f3fdc3eae in 31ms, sequenceid=206, compaction requested=false 2024-11-21T11:32:39,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:39,162 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/f4fc445f981e469cb406a5f47b31a162 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/f4fc445f981e469cb406a5f47b31a162 2024-11-21T11:32:39,167 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into f4fc445f981e469cb406a5f47b31a162(size=91.9 K), total size for store is 113.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:39,167 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:39,167 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188759127; duration=0sec 2024-11-21T11:32:39,167 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:39,168 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:39,715 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T11:32:40,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:40,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:41,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:41,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:41,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:41,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:32:41,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/df0f7011a44145bc983dbcb73d07fdfd is 1080, key is row0160/info:/1732188759131/Put/seqid=0 2024-11-21T11:32:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741863_1039 (size=12516) 2024-11-21T11:32:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741863_1039 (size=12516) 2024-11-21T11:32:41,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/df0f7011a44145bc983dbcb73d07fdfd 2024-11-21T11:32:41,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/df0f7011a44145bc983dbcb73d07fdfd as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd 2024-11-21T11:32:41,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd, entries=7, sequenceid=217, filesize=12.2 K 2024-11-21T11:32:41,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for e186dc33dd4cd397c5a22e8f3fdc3eae in 22ms, sequenceid=217, compaction requested=true 2024-11-21T11:32:41,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:41,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:41,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:41,163 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:41,164 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128850 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:41,164 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:41,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:41,164 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:41,164 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/f4fc445f981e469cb406a5f47b31a162, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=125.8 K 2024-11-21T11:32:41,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-21T11:32:41,165 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4fc445f981e469cb406a5f47b31a162, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732188742817 2024-11-21T11:32:41,165 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting e68cf272b06840868052f86e27034aa9, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732188759103 2024-11-21T11:32:41,165 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting df0f7011a44145bc983dbcb73d07fdfd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732188759131 2024-11-21T11:32:41,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/04e83e43a91c4051bd438136a8ee25ff is 1080, key is row0167/info:/1732188761142/Put/seqid=0 2024-11-21T11:32:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741864_1040 (size=21156) 2024-11-21T11:32:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741864_1040 (size=21156) 2024-11-21T11:32:41,198 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#78 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:41,199 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/9a21a716686f445eb64f50df1731d2af is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:41,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741865_1041 (size=118996) 2024-11-21T11:32:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741865_1041 (size=118996) 2024-11-21T11:32:41,210 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/9a21a716686f445eb64f50df1731d2af as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/9a21a716686f445eb64f50df1731d2af 2024-11-21T11:32:41,216 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into 9a21a716686f445eb64f50df1731d2af(size=116.2 K), total size for store is 116.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:41,216 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:41,216 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188761163; duration=0sec 2024-11-21T11:32:41,216 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:41,216 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:41,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/04e83e43a91c4051bd438136a8ee25ff 2024-11-21T11:32:41,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/04e83e43a91c4051bd438136a8ee25ff as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff 2024-11-21T11:32:41,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff, entries=15, sequenceid=235, filesize=20.7 K 2024-11-21T11:32:41,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for e186dc33dd4cd397c5a22e8f3fdc3eae in 440ms, sequenceid=235, compaction requested=false 2024-11-21T11:32:41,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:42,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:42,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:43,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:43,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:43,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-21T11:32:43,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a2a8f99646924fe8bbcc9deb3d520d22 is 1080, key is row0182/info:/1732188761165/Put/seqid=0 2024-11-21T11:32:43,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741866_1042 (size=17906) 2024-11-21T11:32:43,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741866_1042 (size=17906) 2024-11-21T11:32:43,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a2a8f99646924fe8bbcc9deb3d520d22 2024-11-21T11:32:43,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a2a8f99646924fe8bbcc9deb3d520d22 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22 2024-11-21T11:32:43,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22, entries=12, sequenceid=251, filesize=17.5 K 2024-11-21T11:32:43,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for e186dc33dd4cd397c5a22e8f3fdc3eae in 23ms, sequenceid=251, compaction requested=true 2024-11-21T11:32:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:43,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:43,228 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:43,229 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:43,230 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:43,230 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:43,230 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/9a21a716686f445eb64f50df1731d2af, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=154.4 K 2024-11-21T11:32:43,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-21T11:32:43,230 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a21a716686f445eb64f50df1731d2af, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732188742817 2024-11-21T11:32:43,231 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 04e83e43a91c4051bd438136a8ee25ff, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732188761142 2024-11-21T11:32:43,231 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2a8f99646924fe8bbcc9deb3d520d22, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732188761165 2024-11-21T11:32:43,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/ba0f1043b074425190e962d42322491b is 1080, key is row0194/info:/1732188763207/Put/seqid=0 2024-11-21T11:32:43,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741867_1043 (size=20089) 2024-11-21T11:32:43,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741867_1043 (size=20089) 2024-11-21T11:32:43,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/ba0f1043b074425190e962d42322491b 2024-11-21T11:32:43,244 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#81 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:43,244 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/563e6b68052145d2aa8581976f876308 is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:43,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/ba0f1043b074425190e962d42322491b as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b 2024-11-21T11:32:43,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741868_1044 (size=148409) 2024-11-21T11:32:43,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741868_1044 (size=148409) 2024-11-21T11:32:43,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b, entries=14, sequenceid=268, filesize=19.6 K 2024-11-21T11:32:43,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for e186dc33dd4cd397c5a22e8f3fdc3eae in 28ms, sequenceid=268, compaction requested=false 2024-11-21T11:32:43,258 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/563e6b68052145d2aa8581976f876308 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/563e6b68052145d2aa8581976f876308 2024-11-21T11:32:43,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:43,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:43,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-21T11:32:43,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6e867f24eab407097a8fc7f7f53d40d is 1080, key is row0208/info:/1732188763231/Put/seqid=0 2024-11-21T11:32:43,265 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into 563e6b68052145d2aa8581976f876308(size=144.9 K), total size for store is 164.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:43,265 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:43,265 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188763228; duration=0sec 2024-11-21T11:32:43,265 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:43,265 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:43,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741869_1045 (size=20092) 2024-11-21T11:32:43,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741869_1045 (size=20092) 2024-11-21T11:32:43,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6e867f24eab407097a8fc7f7f53d40d 2024-11-21T11:32:43,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6e867f24eab407097a8fc7f7f53d40d as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d 2024-11-21T11:32:43,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d, entries=14, sequenceid=285, filesize=19.6 K 2024-11-21T11:32:43,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for e186dc33dd4cd397c5a22e8f3fdc3eae in 19ms, sequenceid=285, compaction requested=true 2024-11-21T11:32:43,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:43,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:43,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:43,279 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:43,279 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188590 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:43,280 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:43,280 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:43,280 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/563e6b68052145d2aa8581976f876308, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=184.2 K 2024-11-21T11:32:43,280 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 563e6b68052145d2aa8581976f876308, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732188742817 2024-11-21T11:32:43,281 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba0f1043b074425190e962d42322491b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732188763207 2024-11-21T11:32:43,281 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6e867f24eab407097a8fc7f7f53d40d, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732188763231 2024-11-21T11:32:43,291 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#83 average throughput is 82.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:43,292 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/ae3b8092bbba4b1985c970f705615601 is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:43,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741870_1046 (size=178728) 2024-11-21T11:32:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741870_1046 (size=178728) 2024-11-21T11:32:43,301 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/ae3b8092bbba4b1985c970f705615601 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ae3b8092bbba4b1985c970f705615601 2024-11-21T11:32:43,306 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into ae3b8092bbba4b1985c970f705615601(size=174.5 K), total size for store is 174.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:43,306 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:43,306 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188763278; duration=0sec 2024-11-21T11:32:43,306 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:43,306 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:44,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:44,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:45,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:45,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:45,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-21T11:32:45,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6cb3c3a209744398d1c7e183e9b67a5 is 1080, key is row0222/info:/1732188763260/Put/seqid=0 2024-11-21T11:32:45,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741871_1047 (size=12523) 2024-11-21T11:32:45,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741871_1047 (size=12523) 2024-11-21T11:32:45,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6cb3c3a209744398d1c7e183e9b67a5 2024-11-21T11:32:45,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/a6cb3c3a209744398d1c7e183e9b67a5 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5 2024-11-21T11:32:45,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5, entries=7, sequenceid=297, filesize=12.2 K 2024-11-21T11:32:45,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for e186dc33dd4cd397c5a22e8f3fdc3eae in 24ms, sequenceid=297, compaction requested=false 2024-11-21T11:32:45,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44605 {}] regionserver.HRegion(8855): Flush requested on e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:45,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-21T11:32:45,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2e5092fe674943fea3b4d911c6078192 is 1080, key is row0229/info:/1732188765272/Put/seqid=0 2024-11-21T11:32:45,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741872_1048 (size=23333) 2024-11-21T11:32:45,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741872_1048 (size=23333) 2024-11-21T11:32:45,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2e5092fe674943fea3b4d911c6078192 2024-11-21T11:32:45,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2e5092fe674943fea3b4d911c6078192 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192 2024-11-21T11:32:45,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192, entries=17, sequenceid=317, filesize=22.8 K 2024-11-21T11:32:45,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=11.56 KB/11836 for e186dc33dd4cd397c5a22e8f3fdc3eae in 21ms, sequenceid=317, compaction requested=true 2024-11-21T11:32:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e186dc33dd4cd397c5a22e8f3fdc3eae:info, priority=-2147483648, current under compaction store size is 1 2024-11-21T11:32:45,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:45,317 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-21T11:32:45,317 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214584 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-21T11:32:45,318 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1541): e186dc33dd4cd397c5a22e8f3fdc3eae/info is initiating minor compaction (all files) 2024-11-21T11:32:45,318 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e186dc33dd4cd397c5a22e8f3fdc3eae/info in TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:45,318 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ae3b8092bbba4b1985c970f705615601, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192] into tmpdir=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp, totalSize=209.6 K 2024-11-21T11:32:45,318 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae3b8092bbba4b1985c970f705615601, keycount=160, bloomtype=ROW, size=174.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732188742817 2024-11-21T11:32:45,318 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6cb3c3a209744398d1c7e183e9b67a5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732188763260 2024-11-21T11:32:45,319 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2e5092fe674943fea3b4d911c6078192, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732188765272 2024-11-21T11:32:45,329 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e186dc33dd4cd397c5a22e8f3fdc3eae#info#compaction#86 average throughput is 62.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-21T11:32:45,330 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/6959e451d43b4aa681989f437814760f is 1080, key is row0062/info:/1732188742817/Put/seqid=0 2024-11-21T11:32:45,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741873_1049 (size=204803) 2024-11-21T11:32:45,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741873_1049 (size=204803) 2024-11-21T11:32:45,337 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/6959e451d43b4aa681989f437814760f as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/6959e451d43b4aa681989f437814760f 2024-11-21T11:32:45,342 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e186dc33dd4cd397c5a22e8f3fdc3eae/info of e186dc33dd4cd397c5a22e8f3fdc3eae into 6959e451d43b4aa681989f437814760f(size=200.0 K), total size for store is 200.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-21T11:32:45,342 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:45,342 INFO [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., storeName=e186dc33dd4cd397c5a22e8f3fdc3eae/info, priority=13, startTime=1732188765316; duration=0sec 2024-11-21T11:32:45,342 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-21T11:32:45,342 DEBUG [RS:0;7b462513bfc2:44605-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e186dc33dd4cd397c5a22e8f3fdc3eae:info 2024-11-21T11:32:46,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:46,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:47,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:47,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:47,314 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-21T11:32:47,315 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C44605%2C1732188729784.1732188767315 2024-11-21T11:32:47,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,320 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,320 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,320 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,320 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188730169 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188767315 2024-11-21T11:32:47,321 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41719:41719),(127.0.0.1/127.0.0.1:45177:45177)] 2024-11-21T11:32:47,321 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188730169 is not closed yet, will try archiving it next time 2024-11-21T11:32:47,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741833_1009 (size=315283) 2024-11-21T11:32:47,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741833_1009 (size=315283) 2024-11-21T11:32:47,324 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 077e0264a169ebfd5457c142719e92ea: 2024-11-21T11:32:47,325 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing e186dc33dd4cd397c5a22e8f3fdc3eae 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-21T11:32:47,328 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2605a55b8cb64cdba6cce8187b93fcaa is 1080, key is row0246/info:/1732188765296/Put/seqid=0 2024-11-21T11:32:47,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741875_1051 (size=16839) 2024-11-21T11:32:47,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741875_1051 (size=16839) 2024-11-21T11:32:47,333 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2605a55b8cb64cdba6cce8187b93fcaa 2024-11-21T11:32:47,338 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/.tmp/info/2605a55b8cb64cdba6cce8187b93fcaa as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2605a55b8cb64cdba6cce8187b93fcaa 2024-11-21T11:32:47,343 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2605a55b8cb64cdba6cce8187b93fcaa, entries=11, sequenceid=332, filesize=16.4 K 2024-11-21T11:32:47,344 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for e186dc33dd4cd397c5a22e8f3fdc3eae in 20ms, sequenceid=332, compaction requested=false 2024-11-21T11:32:47,344 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e186dc33dd4cd397c5a22e8f3fdc3eae: 2024-11-21T11:32:47,344 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-21T11:32:47,347 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/f43c1f06d7cb4b8697c77c55b659be51 is 193, key is TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae./info:regioninfo/1732188745632/Put/seqid=0 2024-11-21T11:32:47,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741876_1052 (size=6223) 2024-11-21T11:32:47,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741876_1052 (size=6223) 2024-11-21T11:32:47,354 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/f43c1f06d7cb4b8697c77c55b659be51 2024-11-21T11:32:47,359 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/.tmp/info/f43c1f06d7cb4b8697c77c55b659be51 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/info/f43c1f06d7cb4b8697c77c55b659be51 2024-11-21T11:32:47,364 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/info/f43c1f06d7cb4b8697c77c55b659be51, entries=5, sequenceid=21, filesize=6.1 K 2024-11-21T11:32:47,365 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-21T11:32:47,365 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-21T11:32:47,365 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C44605%2C1732188729784.1732188767365 2024-11-21T11:32:47,369 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,370 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,370 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,370 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,370 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,370 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188767315 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188767365 2024-11-21T11:32:47,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41719:41719),(127.0.0.1/127.0.0.1:45177:45177)] 2024-11-21T11:32:47,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188767315 is not closed yet, will try archiving it next time 2024-11-21T11:32:47,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741874_1050 (size=731) 2024-11-21T11:32:47,371 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188730169 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs/7b462513bfc2%2C44605%2C1732188729784.1732188730169 2024-11-21T11:32:47,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741874_1050 (size=731) 2024-11-21T11:32:47,372 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T11:32:47,372 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/WALs/7b462513bfc2,44605,1732188729784/7b462513bfc2%2C44605%2C1732188729784.1732188767315 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs/7b462513bfc2%2C44605%2C1732188729784.1732188767315 2024-11-21T11:32:47,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:32:47,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:47,472 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:47,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:47,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:47,472 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:32:47,472 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:32:47,472 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=658906858, stopped=false 2024-11-21T11:32:47,473 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,44595,1732188729732 2024-11-21T11:32:47,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:47,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:47,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:47,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:47,474 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:47,474 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:47,475 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:47,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:47,475 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:47,475 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,44605,1732188729784' ***** 2024-11-21T11:32:47,475 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:32:47,475 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:32:47,476 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(3091): Received CLOSE for 077e0264a169ebfd5457c142719e92ea 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(3091): Received CLOSE for e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,44605,1732188729784 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:44605. 2024-11-21T11:32:47,476 DEBUG [RS:0;7b462513bfc2:44605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:47,476 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 077e0264a169ebfd5457c142719e92ea, disabling compactions & flushes 2024-11-21T11:32:47,476 DEBUG [RS:0;7b462513bfc2:44605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:47,476 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:47,476 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:32:47,476 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. after waiting 0 ms 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:32:47,476 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:47,476 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:32:47,477 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T11:32:47,477 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1325): Online Regions={077e0264a169ebfd5457c142719e92ea=TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea., e186dc33dd4cd397c5a22e8f3fdc3eae=TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T11:32:47,477 DEBUG [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1351): Waiting on 077e0264a169ebfd5457c142719e92ea, 1588230740, e186dc33dd4cd397c5a22e8f3fdc3eae 2024-11-21T11:32:47,477 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:32:47,477 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:32:47,477 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:32:47,477 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:32:47,477 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:32:47,477 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-bottom] to archive 2024-11-21T11:32:47,478 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:32:47,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:47,480 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7b462513bfc2:44595 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T11:32:47,480 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-21T11:32:47,481 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-21T11:32:47,482 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:32:47,482 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:47,482 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188767477Running coprocessor pre-close hooks at 1732188767477Disabling compacts and flushes for region at 1732188767477Disabling writes for close at 1732188767477Writing region close event to WAL at 1732188767478 (+1 ms)Running coprocessor post-close hooks at 1732188767482 (+4 ms)Closed at 1732188767482 2024-11-21T11:32:47,482 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:47,483 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/077e0264a169ebfd5457c142719e92ea/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-21T11:32:47,484 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 077e0264a169ebfd5457c142719e92ea: Waiting for close lock at 1732188767476Running coprocessor pre-close hooks at 1732188767476Disabling compacts and flushes for region at 1732188767476Disabling writes for close at 1732188767476Writing region close event to WAL at 1732188767481 (+5 ms)Running coprocessor post-close hooks at 1732188767484 (+3 ms)Closed at 1732188767484 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732188744906.077e0264a169ebfd5457c142719e92ea. 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e186dc33dd4cd397c5a22e8f3fdc3eae, disabling compactions & flushes 2024-11-21T11:32:47,484 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. after waiting 0 ms 2024-11-21T11:32:47,484 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:47,484 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062->hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/1c5b168b1cd5495e81cd9c96a67dc062/info/5ce7938b7fc548e0abbf3d19a9298299-top, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/397fc281473c475099657aad269e4639, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/3f7418b70d624e2db4b06869d29d108a, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/f4fc445f981e469cb406a5f47b31a162, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/9a21a716686f445eb64f50df1731d2af, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/563e6b68052145d2aa8581976f876308, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ae3b8092bbba4b1985c970f705615601, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192] to archive 2024-11-21T11:32:47,485 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-21T11:32:47,486 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/5ce7938b7fc548e0abbf3d19a9298299.1c5b168b1cd5495e81cd9c96a67dc062 2024-11-21T11:32:47,487 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-1bbcdf02c19945aa9b0e2e1cf109e951 2024-11-21T11:32:47,488 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-4fd2a2eda1e046b6aab020f5b21a170f 2024-11-21T11:32:47,489 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/397fc281473c475099657aad269e4639 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/397fc281473c475099657aad269e4639 2024-11-21T11:32:47,490 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/TestLogRolling-testLogRolling=1c5b168b1cd5495e81cd9c96a67dc062-d8ff1c4c7f1b46d0861ade7da6b1e4f7 2024-11-21T11:32:47,491 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a63c72de74814e0fa6bef0706357e2b3 2024-11-21T11:32:47,492 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/3f7418b70d624e2db4b06869d29d108a to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/3f7418b70d624e2db4b06869d29d108a 2024-11-21T11:32:47,493 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/86f4f88f9f3f4b2db606dd6cb432d609 2024-11-21T11:32:47,495 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/484b1fd047144d65b806bc0a48cd75c6 2024-11-21T11:32:47,496 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/f4fc445f981e469cb406a5f47b31a162 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/f4fc445f981e469cb406a5f47b31a162 2024-11-21T11:32:47,497 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/1f3472331ae3428086283ea8ae3e0644 2024-11-21T11:32:47,498 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/e68cf272b06840868052f86e27034aa9 2024-11-21T11:32:47,499 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/9a21a716686f445eb64f50df1731d2af to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/9a21a716686f445eb64f50df1731d2af 2024-11-21T11:32:47,500 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/df0f7011a44145bc983dbcb73d07fdfd 2024-11-21T11:32:47,501 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/04e83e43a91c4051bd438136a8ee25ff 2024-11-21T11:32:47,502 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/563e6b68052145d2aa8581976f876308 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/563e6b68052145d2aa8581976f876308 2024-11-21T11:32:47,503 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a2a8f99646924fe8bbcc9deb3d520d22 2024-11-21T11:32:47,504 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ba0f1043b074425190e962d42322491b 2024-11-21T11:32:47,505 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ae3b8092bbba4b1985c970f705615601 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/ae3b8092bbba4b1985c970f705615601 2024-11-21T11:32:47,506 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6e867f24eab407097a8fc7f7f53d40d 2024-11-21T11:32:47,507 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/a6cb3c3a209744398d1c7e183e9b67a5 2024-11-21T11:32:47,508 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192 to hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/archive/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/info/2e5092fe674943fea3b4d911c6078192 2024-11-21T11:32:47,508 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [397fc281473c475099657aad269e4639=43081, a63c72de74814e0fa6bef0706357e2b3=12516, 3f7418b70d624e2db4b06869d29d108a=68045, 86f4f88f9f3f4b2db606dd6cb432d609=22238, 484b1fd047144d65b806bc0a48cd75c6=15750, f4fc445f981e469cb406a5f47b31a162=94096, 1f3472331ae3428086283ea8ae3e0644=20078, e68cf272b06840868052f86e27034aa9=22238, 9a21a716686f445eb64f50df1731d2af=118996, df0f7011a44145bc983dbcb73d07fdfd=12516, 04e83e43a91c4051bd438136a8ee25ff=21156, 563e6b68052145d2aa8581976f876308=148409, a2a8f99646924fe8bbcc9deb3d520d22=17906, ba0f1043b074425190e962d42322491b=20089, ae3b8092bbba4b1985c970f705615601=178728, a6e867f24eab407097a8fc7f7f53d40d=20092, a6cb3c3a209744398d1c7e183e9b67a5=12523, 2e5092fe674943fea3b4d911c6078192=23333] 2024-11-21T11:32:47,511 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/data/default/TestLogRolling-testLogRolling/e186dc33dd4cd397c5a22e8f3fdc3eae/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-21T11:32:47,512 INFO [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:47,512 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e186dc33dd4cd397c5a22e8f3fdc3eae: Waiting for close lock at 1732188767484Running coprocessor pre-close hooks at 1732188767484Disabling compacts and flushes for region at 1732188767484Disabling writes for close at 1732188767484Writing region close event to WAL at 1732188767508 (+24 ms)Running coprocessor post-close hooks at 1732188767512 (+4 ms)Closed at 1732188767512 2024-11-21T11:32:47,512 DEBUG [RS_CLOSE_REGION-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732188744906.e186dc33dd4cd397c5a22e8f3fdc3eae. 2024-11-21T11:32:47,677 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,44605,1732188729784; all regions closed. 2024-11-21T11:32:47,677 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,678 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,678 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741834_1010 (size=8107) 2024-11-21T11:32:47,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741834_1010 (size=8107) 2024-11-21T11:32:47,682 DEBUG [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs 2024-11-21T11:32:47,682 INFO [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C44605%2C1732188729784.meta:.meta(num 1732188730527) 2024-11-21T11:32:47,682 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,682 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,682 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,682 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,683 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741877_1053 (size=780) 2024-11-21T11:32:47,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741877_1053 (size=780) 2024-11-21T11:32:47,686 DEBUG [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/oldWALs 2024-11-21T11:32:47,686 INFO [RS:0;7b462513bfc2:44605 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C44605%2C1732188729784:(num 1732188767365) 2024-11-21T11:32:47,686 DEBUG [RS:0;7b462513bfc2:44605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:47,686 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:32:47,686 INFO [RS:0;7b462513bfc2:44605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:47,686 INFO [RS:0;7b462513bfc2:44605 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:47,686 INFO [RS:0;7b462513bfc2:44605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:47,686 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:47,687 INFO [RS:0;7b462513bfc2:44605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44605 2024-11-21T11:32:47,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,44605,1732188729784 2024-11-21T11:32:47,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:32:47,689 INFO [RS:0;7b462513bfc2:44605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:47,690 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,44605,1732188729784] 2024-11-21T11:32:47,691 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,44605,1732188729784 already deleted, retry=false 2024-11-21T11:32:47,691 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,44605,1732188729784 expired; onlineServers=0 2024-11-21T11:32:47,691 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,44595,1732188729732' ***** 2024-11-21T11:32:47,691 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:47,692 DEBUG [M:0;7b462513bfc2:44595 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:32:47,692 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:32:47,692 DEBUG [M:0;7b462513bfc2:44595 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:32:47,692 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188729927 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188729927,5,FailOnTimeoutGroup] 2024-11-21T11:32:47,692 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188729926 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188729926,5,FailOnTimeoutGroup] 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:47,692 DEBUG [M:0;7b462513bfc2:44595 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:32:47,692 INFO [M:0;7b462513bfc2:44595 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:47,693 INFO [M:0;7b462513bfc2:44595 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:32:47,693 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:32:47,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:32:47,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:47,693 DEBUG [M:0;7b462513bfc2:44595 {}] zookeeper.ZKUtil(347): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:32:47,693 WARN [M:0;7b462513bfc2:44595 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:32:47,694 INFO [M:0;7b462513bfc2:44595 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/.lastflushedseqids 2024-11-21T11:32:47,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741878_1054 (size=228) 2024-11-21T11:32:47,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741878_1054 (size=228) 2024-11-21T11:32:47,699 INFO [M:0;7b462513bfc2:44595 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:32:47,700 INFO [M:0;7b462513bfc2:44595 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:32:47,700 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:32:47,700 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:47,700 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:47,700 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:32:47,700 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:47,700 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.71 KB heapSize=65.93 KB 2024-11-21T11:32:47,715 DEBUG [M:0;7b462513bfc2:44595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd0804cb7bbb4ec980d95f4460800d56 is 82, key is hbase:meta,,1/info:regioninfo/1732188730551/Put/seqid=0 2024-11-21T11:32:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741879_1055 (size=5672) 2024-11-21T11:32:47,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741879_1055 (size=5672) 2024-11-21T11:32:47,721 INFO [M:0;7b462513bfc2:44595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd0804cb7bbb4ec980d95f4460800d56 2024-11-21T11:32:47,740 DEBUG [M:0;7b462513bfc2:44595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df4603e8eb974189974d682adf80b356 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732188730979/Put/seqid=0 2024-11-21T11:32:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741880_1056 (size=7681) 2024-11-21T11:32:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741880_1056 (size=7681) 2024-11-21T11:32:47,746 INFO [M:0;7b462513bfc2:44595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.11 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df4603e8eb974189974d682adf80b356 2024-11-21T11:32:47,750 INFO [M:0;7b462513bfc2:44595 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df4603e8eb974189974d682adf80b356 2024-11-21T11:32:47,764 DEBUG [M:0;7b462513bfc2:44595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f6808fccb2044859ab43408de372a912 is 69, key is 7b462513bfc2,44605,1732188729784/rs:state/1732188730017/Put/seqid=0 2024-11-21T11:32:47,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741881_1057 (size=5156) 2024-11-21T11:32:47,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741881_1057 (size=5156) 2024-11-21T11:32:47,770 INFO [M:0;7b462513bfc2:44595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f6808fccb2044859ab43408de372a912 2024-11-21T11:32:47,787 DEBUG [M:0;7b462513bfc2:44595 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b2a3f48aad4fb5ae65683ba8abf03d is 52, key is load_balancer_on/state:d/1732188730608/Put/seqid=0 2024-11-21T11:32:47,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:47,790 INFO [RS:0;7b462513bfc2:44605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:47,791 INFO [RS:0;7b462513bfc2:44605 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,44605,1732188729784; zookeeper connection closed. 2024-11-21T11:32:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44605-0x1013a4c5c050001, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:47,791 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6ee30db8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6ee30db8 2024-11-21T11:32:47,791 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:32:47,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741882_1058 (size=5056) 2024-11-21T11:32:47,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741882_1058 (size=5056) 2024-11-21T11:32:47,793 INFO [M:0;7b462513bfc2:44595 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b2a3f48aad4fb5ae65683ba8abf03d 2024-11-21T11:32:47,797 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd0804cb7bbb4ec980d95f4460800d56 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd0804cb7bbb4ec980d95f4460800d56 2024-11-21T11:32:47,801 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd0804cb7bbb4ec980d95f4460800d56, entries=8, sequenceid=129, filesize=5.5 K 2024-11-21T11:32:47,802 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/df4603e8eb974189974d682adf80b356 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df4603e8eb974189974d682adf80b356 2024-11-21T11:32:47,806 INFO [M:0;7b462513bfc2:44595 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for df4603e8eb974189974d682adf80b356 2024-11-21T11:32:47,806 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/df4603e8eb974189974d682adf80b356, entries=14, sequenceid=129, filesize=7.5 K 2024-11-21T11:32:47,806 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f6808fccb2044859ab43408de372a912 as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f6808fccb2044859ab43408de372a912 2024-11-21T11:32:47,811 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f6808fccb2044859ab43408de372a912, entries=1, sequenceid=129, filesize=5.0 K 2024-11-21T11:32:47,811 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/26b2a3f48aad4fb5ae65683ba8abf03d as hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26b2a3f48aad4fb5ae65683ba8abf03d 2024-11-21T11:32:47,815 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39871/user/jenkins/test-data/2ef891f4-d400-46a5-5c30-1a9e47bc0e41/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/26b2a3f48aad4fb5ae65683ba8abf03d, entries=1, sequenceid=129, filesize=4.9 K 2024-11-21T11:32:47,817 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=129, compaction requested=false 2024-11-21T11:32:47,818 INFO [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:47,818 DEBUG [M:0;7b462513bfc2:44595 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188767700Disabling compacts and flushes for region at 1732188767700Disabling writes for close at 1732188767700Obtaining lock to block concurrent updates at 1732188767700Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188767700Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54997, getHeapSize=67448, getOffHeapSize=0, getCellsCount=152 at 1732188767700Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188767701 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188767701Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188767715 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188767715Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188767725 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188767740 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188767740Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188767750 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188767764 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188767764Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188767774 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188767787 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188767787Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@511742b0: reopening flushed file at 1732188767797 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16c3a967: reopening flushed file at 1732188767801 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5864866e: reopening flushed file at 1732188767806 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@242719af: reopening flushed file at 1732188767811 (+5 ms)Finished flush of dataSize ~53.71 KB/54997, heapSize ~65.87 KB/67448, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=129, compaction requested=false at 1732188767817 (+6 ms)Writing region close event to WAL at 1732188767818 (+1 ms)Closed at 1732188767818 2024-11-21T11:32:47,818 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,818 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,819 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,819 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,819 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:47,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34627 is added to blk_1073741830_1006 (size=63927) 2024-11-21T11:32:47,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37673 is added to blk_1073741830_1006 (size=63927) 2024-11-21T11:32:47,821 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:47,821 INFO [M:0;7b462513bfc2:44595 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:32:47,821 INFO [M:0;7b462513bfc2:44595 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44595 2024-11-21T11:32:47,822 INFO [M:0;7b462513bfc2:44595 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:47,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:47,924 INFO [M:0;7b462513bfc2:44595 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:47,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44595-0x1013a4c5c050000, quorum=127.0.0.1:49560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:47,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cfa2328{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:47,927 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@551592b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:47,927 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:47,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54fcac{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:47,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@671b15e6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:47,929 WARN [BP-1960743717-172.17.0.2-1732188728960 heartbeating to localhost/127.0.0.1:39871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:47,929 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:47,929 WARN [BP-1960743717-172.17.0.2-1732188728960 heartbeating to localhost/127.0.0.1:39871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1960743717-172.17.0.2-1732188728960 (Datanode Uuid 8229ecd5-60eb-4920-9abf-c1019806bb2a) service to localhost/127.0.0.1:39871 2024-11-21T11:32:47,929 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:47,929 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data3/current/BP-1960743717-172.17.0.2-1732188728960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:47,930 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data4/current/BP-1960743717-172.17.0.2-1732188728960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:47,930 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:47,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34e466bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:47,932 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4181d37d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:47,932 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:47,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73447fd2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:47,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b395bdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:47,938 WARN [BP-1960743717-172.17.0.2-1732188728960 heartbeating to localhost/127.0.0.1:39871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:47,938 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:47,938 WARN [BP-1960743717-172.17.0.2-1732188728960 heartbeating to localhost/127.0.0.1:39871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1960743717-172.17.0.2-1732188728960 (Datanode Uuid a157e785-e79c-4ae9-a358-082b586e2edd) service to localhost/127.0.0.1:39871 2024-11-21T11:32:47,938 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:47,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data1/current/BP-1960743717-172.17.0.2-1732188728960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:47,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/cluster_76847933-9f96-1e47-8911-efafed2418cd/data/data2/current/BP-1960743717-172.17.0.2-1732188728960 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:47,939 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:47,945 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35e03861{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:32:47,945 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7142f9c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:47,945 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:47,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8987cea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:47,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e8771e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:47,952 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:32:47,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:32:47,988 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 206) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39871 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:39871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=58 (was 63), ProcessCount=11 (was 11), AvailableMemoryMB=4907 (was 4966) 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=58, ProcessCount=11, AvailableMemoryMB=4907 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.log.dir so I do NOT create it in target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1d0aee53-ad8d-2154-4bf7-fd5fbad61e58/hadoop.tmp.dir so I do NOT create it in target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854, deleteOnExit=true 2024-11-21T11:32:47,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/test.cache.data in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T11:32:47,997 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:32:47,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T11:32:47,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/nfs.dump.dir in system properties and HBase conf 2024-11-21T11:32:47,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/java.io.tmpdir in system properties and HBase conf 2024-11-21T11:32:47,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T11:32:47,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T11:32:47,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T11:32:48,011 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:32:48,030 INFO [regionserver/7b462513bfc2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:32:48,068 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:48,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:48,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:48,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:48,073 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:32:48,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:48,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32d01bcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:48,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@512e80eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:48,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:48,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:48,187 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54644e01{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/java.io.tmpdir/jetty-localhost-45029-hadoop-hdfs-3_4_1-tests_jar-_-any-14821383998580496217/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:32:48,187 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@134d2ab8{HTTP/1.1, (http/1.1)}{localhost:45029} 2024-11-21T11:32:48,187 INFO [Time-limited test {}] server.Server(415): Started @275379ms 2024-11-21T11:32:48,199 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-21T11:32:48,255 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:48,257 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:48,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:48,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:48,258 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T11:32:48,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8a9439{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:48,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e32ebb8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:48,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:32:48,293 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T11:32:48,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T11:32:48,294 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-21T11:32:48,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79ca80d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/java.io.tmpdir/jetty-localhost-43635-hadoop-hdfs-3_4_1-tests_jar-_-any-7703213361550471371/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:48,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@150dab73{HTTP/1.1, (http/1.1)}{localhost:43635} 2024-11-21T11:32:48,374 INFO [Time-limited test {}] server.Server(415): Started @275566ms 2024-11-21T11:32:48,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:32:48,404 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T11:32:48,407 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T11:32:48,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T11:32:48,407 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T11:32:48,407 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T11:32:48,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,AVAILABLE} 2024-11-21T11:32:48,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T11:32:48,484 WARN [Thread-2459 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data1/current/BP-1095317787-172.17.0.2-1732188768016/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:48,484 WARN [Thread-2460 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data2/current/BP-1095317787-172.17.0.2-1732188768016/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:48,505 WARN [Thread-2438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:32:48,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x348c018d3d13ca4e with lease ID 0xbbe4987a3ab3da44: Processing first storage report for DS-5b36b4c6-85bd-4b9a-a954-83f2263cd478 from datanode DatanodeRegistration(127.0.0.1:43833, datanodeUuid=82c04bef-0e61-4c22-8d12-bba6c6ae16cc, infoPort=44737, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016) 2024-11-21T11:32:48,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x348c018d3d13ca4e with lease ID 0xbbe4987a3ab3da44: from storage DS-5b36b4c6-85bd-4b9a-a954-83f2263cd478 node DatanodeRegistration(127.0.0.1:43833, datanodeUuid=82c04bef-0e61-4c22-8d12-bba6c6ae16cc, infoPort=44737, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:48,508 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x348c018d3d13ca4e with lease ID 0xbbe4987a3ab3da44: Processing first storage report for DS-0ced8699-cff4-4020-b756-612fccc73ddd from datanode DatanodeRegistration(127.0.0.1:43833, datanodeUuid=82c04bef-0e61-4c22-8d12-bba6c6ae16cc, infoPort=44737, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016) 2024-11-21T11:32:48,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x348c018d3d13ca4e with lease ID 0xbbe4987a3ab3da44: from storage DS-0ced8699-cff4-4020-b756-612fccc73ddd node DatanodeRegistration(127.0.0.1:43833, datanodeUuid=82c04bef-0e61-4c22-8d12-bba6c6ae16cc, infoPort=44737, infoSecurePort=0, ipcPort=39573, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:48,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@87b2e2b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/java.io.tmpdir/jetty-localhost-42383-hadoop-hdfs-3_4_1-tests_jar-_-any-9195726726906859180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:48,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:42383} 2024-11-21T11:32:48,528 INFO [Time-limited test {}] server.Server(415): Started @275720ms 2024-11-21T11:32:48,529 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T11:32:48,625 WARN [Thread-2485 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data3/current/BP-1095317787-172.17.0.2-1732188768016/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:48,625 WARN [Thread-2486 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data4/current/BP-1095317787-172.17.0.2-1732188768016/current, will proceed with Du for space computation calculation, 2024-11-21T11:32:48,642 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T11:32:48,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18b342637cb04c63 with lease ID 0xbbe4987a3ab3da45: Processing first storage report for DS-941e54bc-5d7e-4110-b03f-e347f5b7d38c from datanode DatanodeRegistration(127.0.0.1:32867, datanodeUuid=3a485a92-c4b4-4957-8ae0-8a93aa42cdde, infoPort=44677, infoSecurePort=0, ipcPort=36773, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016) 2024-11-21T11:32:48,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18b342637cb04c63 with lease ID 0xbbe4987a3ab3da45: from storage DS-941e54bc-5d7e-4110-b03f-e347f5b7d38c node DatanodeRegistration(127.0.0.1:32867, datanodeUuid=3a485a92-c4b4-4957-8ae0-8a93aa42cdde, infoPort=44677, infoSecurePort=0, ipcPort=36773, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:48,644 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18b342637cb04c63 with lease ID 0xbbe4987a3ab3da45: Processing first storage report for DS-cf1b0430-12c9-4380-926c-ae7f63cb840a from datanode DatanodeRegistration(127.0.0.1:32867, datanodeUuid=3a485a92-c4b4-4957-8ae0-8a93aa42cdde, infoPort=44677, infoSecurePort=0, ipcPort=36773, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016) 2024-11-21T11:32:48,644 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18b342637cb04c63 with lease ID 0xbbe4987a3ab3da45: from storage DS-cf1b0430-12c9-4380-926c-ae7f63cb840a node DatanodeRegistration(127.0.0.1:32867, datanodeUuid=3a485a92-c4b4-4957-8ae0-8a93aa42cdde, infoPort=44677, infoSecurePort=0, ipcPort=36773, storageInfo=lv=-57;cid=testClusterID;nsid=1340702347;c=1732188768016), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T11:32:48,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41 2024-11-21T11:32:48,653 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/zookeeper_0, clientPort=52079, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T11:32:48,654 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52079 2024-11-21T11:32:48,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,655 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:32:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741825_1001 (size=7) 2024-11-21T11:32:48,664 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17 with version=8 2024-11-21T11:32:48,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36571/user/jenkins/test-data/89c7b4f5-f4c6-b0d4-bd50-3904279eacaa/hbase-staging 2024-11-21T11:32:48,666 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T11:32:48,667 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:32:48,668 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40089 2024-11-21T11:32:48,669 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40089 connecting to ZooKeeper ensemble=127.0.0.1:52079 2024-11-21T11:32:48,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:400890x0, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:32:48,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40089-0x1013a4cf41c0000 connected 2024-11-21T11:32:48,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:48,690 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17, hbase.cluster.distributed=false 2024-11-21T11:32:48,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:32:48,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40089 2024-11-21T11:32:48,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40089 2024-11-21T11:32:48,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40089 2024-11-21T11:32:48,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40089 2024-11-21T11:32:48,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40089 2024-11-21T11:32:48,709 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b462513bfc2:0 server-side Connection retries=45 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T11:32:48,709 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T11:32:48,710 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39997 2024-11-21T11:32:48,711 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39997 connecting to ZooKeeper ensemble=127.0.0.1:52079 2024-11-21T11:32:48,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:399970x0, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T11:32:48,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:399970x0, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:48,717 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39997-0x1013a4cf41c0001 connected 2024-11-21T11:32:48,717 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T11:32:48,717 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T11:32:48,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T11:32:48,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T11:32:48,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39997 2024-11-21T11:32:48,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39997 2024-11-21T11:32:48,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39997 2024-11-21T11:32:48,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39997 2024-11-21T11:32:48,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39997 2024-11-21T11:32:48,731 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b462513bfc2:40089 2024-11-21T11:32:48,732 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:48,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:48,734 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T11:32:48,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,742 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T11:32:48,742 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b462513bfc2,40089,1732188768666 from backup master directory 2024-11-21T11:32:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:48,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T11:32:48,744 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:32:48,744 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,748 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/hbase.id] with ID: 9ece5fe1-6a85-4a48-a793-27f1f96f785b 2024-11-21T11:32:48,748 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/.tmp/hbase.id 2024-11-21T11:32:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:32:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741826_1002 (size=42) 2024-11-21T11:32:48,754 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/.tmp/hbase.id]:[hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/hbase.id] 2024-11-21T11:32:48,765 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:48,765 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T11:32:48,766 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T11:32:48,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:32:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741827_1003 (size=196) 2024-11-21T11:32:48,774 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T11:32:48,775 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T11:32:48,775 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:48,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:32:48,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741828_1004 (size=1189) 2024-11-21T11:32:48,782 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store 2024-11-21T11:32:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:32:48,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741829_1005 (size=34) 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:32:48,788 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:48,788 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:48,788 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188768788Disabling compacts and flushes for region at 1732188768788Disabling writes for close at 1732188768788Writing region close event to WAL at 1732188768788Closed at 1732188768788 2024-11-21T11:32:48,789 WARN [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/.initializing 2024-11-21T11:32:48,789 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/WALs/7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,791 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C40089%2C1732188768666, suffix=, logDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/WALs/7b462513bfc2,40089,1732188768666, archiveDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/oldWALs, maxLogs=10 2024-11-21T11:32:48,791 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C40089%2C1732188768666.1732188768791 2024-11-21T11:32:48,795 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/WALs/7b462513bfc2,40089,1732188768666/7b462513bfc2%2C40089%2C1732188768666.1732188768791 2024-11-21T11:32:48,796 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44737:44737),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-21T11:32:48,797 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:32:48,797 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:48,797 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,797 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T11:32:48,799 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:48,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T11:32:48,801 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:48,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T11:32:48,802 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:48,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T11:32:48,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,804 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T11:32:48,804 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,804 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,804 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,805 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,805 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,806 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T11:32:48,806 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T11:32:48,808 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:32:48,808 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862115, jitterRate=0.09623649716377258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T11:32:48,809 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732188768797Initializing all the Stores at 1732188768798 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188768798Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188768798Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188768798Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188768798Cleaning up temporary data from old regions at 1732188768805 (+7 ms)Region opened successfully at 1732188768809 (+4 ms) 2024-11-21T11:32:48,809 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T11:32:48,811 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34ae6076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:32:48,812 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T11:32:48,812 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T11:32:48,812 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T11:32:48,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T11:32:48,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T11:32:48,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T11:32:48,813 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T11:32:48,815 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T11:32:48,816 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T11:32:48,817 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T11:32:48,817 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T11:32:48,818 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T11:32:48,820 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T11:32:48,820 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T11:32:48,821 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T11:32:48,822 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T11:32:48,823 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T11:32:48,824 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T11:32:48,825 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T11:32:48,826 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T11:32:48,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:48,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:48,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,834 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b462513bfc2,40089,1732188768666, sessionid=0x1013a4cf41c0000, setting cluster-up flag (Was=false) 2024-11-21T11:32:48,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,841 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T11:32:48,842 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:48,850 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T11:32:48,850 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b462513bfc2,40089,1732188768666 2024-11-21T11:32:48,851 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T11:32:48,853 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:48,853 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T11:32:48,853 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T11:32:48,853 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b462513bfc2,40089,1732188768666 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T11:32:48,854 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b462513bfc2:0, corePoolSize=5, maxPoolSize=5 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b462513bfc2:0, corePoolSize=10, maxPoolSize=10 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:32:48,855 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732188798856 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T11:32:48,856 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T11:32:48,856 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T11:32:48,856 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T11:32:48,857 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T11:32:48,857 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,857 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188768857,5,FailOnTimeoutGroup] 2024-11-21T11:32:48,858 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188768858,5,FailOnTimeoutGroup] 2024-11-21T11:32:48,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T11:32:48,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,858 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,858 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T11:32:48,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:32:48,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741831_1007 (size=1321) 2024-11-21T11:32:48,866 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T11:32:48,866 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17 2024-11-21T11:32:48,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:32:48,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741832_1008 (size=32) 2024-11-21T11:32:48,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:48,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:32:48,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:32:48,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:48,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:32:48,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:32:48,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:48,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:32:48,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:32:48,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:48,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:32:48,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:32:48,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:48,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:48,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:32:48,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740 2024-11-21T11:32:48,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740 2024-11-21T11:32:48,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:32:48,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:32:48,880 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:32:48,881 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:32:48,883 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T11:32:48,883 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729890, jitterRate=-0.07189737260341644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732188768872Initializing all the Stores at 1732188768873 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188768873Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188768873Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188768873Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188768873Cleaning up temporary data from old regions at 1732188768880 (+7 ms)Region opened successfully at 1732188768883 (+3 ms) 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:32:48,884 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:32:48,884 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:48,884 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188768884Disabling compacts and flushes for region at 1732188768884Disabling writes for close at 1732188768884Writing region close event to WAL at 1732188768884Closed at 1732188768884 2024-11-21T11:32:48,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:48,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T11:32:48,885 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T11:32:48,886 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:32:48,887 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T11:32:48,922 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(746): ClusterId : 9ece5fe1-6a85-4a48-a793-27f1f96f785b 2024-11-21T11:32:48,922 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T11:32:48,924 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T11:32:48,924 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T11:32:48,927 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T11:32:48,927 DEBUG [RS:0;7b462513bfc2:39997 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6290443f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b462513bfc2/172.17.0.2:0 2024-11-21T11:32:48,939 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b462513bfc2:39997 2024-11-21T11:32:48,939 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T11:32:48,939 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T11:32:48,939 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T11:32:48,940 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b462513bfc2,40089,1732188768666 with port=39997, startcode=1732188768708 2024-11-21T11:32:48,940 DEBUG [RS:0;7b462513bfc2:39997 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T11:32:48,942 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T11:32:48,942 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40089 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,943 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40089 {}] master.ServerManager(517): Registering regionserver=7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,944 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17 2024-11-21T11:32:48,944 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43373 2024-11-21T11:32:48,944 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T11:32:48,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:32:48,946 DEBUG [RS:0;7b462513bfc2:39997 {}] zookeeper.ZKUtil(111): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,946 WARN [RS:0;7b462513bfc2:39997 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T11:32:48,946 INFO [RS:0;7b462513bfc2:39997 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:48,946 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,946 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b462513bfc2,39997,1732188768708] 2024-11-21T11:32:48,949 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T11:32:48,951 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T11:32:48,951 INFO [RS:0;7b462513bfc2:39997 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T11:32:48,951 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,952 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T11:32:48,953 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T11:32:48,953 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b462513bfc2:0, corePoolSize=2, maxPoolSize=2 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b462513bfc2:0, corePoolSize=1, maxPoolSize=1 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:32:48,953 DEBUG [RS:0;7b462513bfc2:39997 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b462513bfc2:0, corePoolSize=3, maxPoolSize=3 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,954 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39997,1732188768708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:32:48,968 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T11:32:48,968 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,39997,1732188768708-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,968 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,968 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.Replication(171): 7b462513bfc2,39997,1732188768708 started 2024-11-21T11:32:48,982 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:48,982 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1482): Serving as 7b462513bfc2,39997,1732188768708, RpcServer on 7b462513bfc2/172.17.0.2:39997, sessionid=0x1013a4cf41c0001 2024-11-21T11:32:48,982 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T11:32:48,982 DEBUG [RS:0;7b462513bfc2:39997 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,982 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39997,1732188768708' 2024-11-21T11:32:48,982 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b462513bfc2,39997,1732188768708 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b462513bfc2,39997,1732188768708' 2024-11-21T11:32:48,983 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T11:32:48,984 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T11:32:48,984 DEBUG [RS:0;7b462513bfc2:39997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T11:32:48,984 INFO [RS:0;7b462513bfc2:39997 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T11:32:48,984 INFO [RS:0;7b462513bfc2:39997 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T11:32:49,038 WARN [7b462513bfc2:40089 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T11:32:49,086 INFO [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C39997%2C1732188768708, suffix=, logDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/7b462513bfc2,39997,1732188768708, archiveDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs, maxLogs=32 2024-11-21T11:32:49,086 INFO [RS:0;7b462513bfc2:39997 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39997%2C1732188768708.1732188769086 2024-11-21T11:32:49,092 INFO [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/7b462513bfc2,39997,1732188768708/7b462513bfc2%2C39997%2C1732188768708.1732188769086 2024-11-21T11:32:49,095 DEBUG [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44737:44737),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-21T11:32:49,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,39231,1732188598648/7b462513bfc2%2C39231%2C1732188598648.1732188598878 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:49,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37309/user/jenkins/test-data/f177e63e-14e1-4466-f682-513fd4e16647/WALs/7b462513bfc2,40563,1732188597662/7b462513bfc2%2C40563%2C1732188597662.meta.1732188598486.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-21T11:32:49,288 DEBUG [7b462513bfc2:40089 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T11:32:49,288 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b462513bfc2,39997,1732188768708 2024-11-21T11:32:49,290 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,39997,1732188768708, state=OPENING 2024-11-21T11:32:49,291 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T11:32:49,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:49,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:49,293 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:49,293 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T11:32:49,293 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:49,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,39997,1732188768708}] 2024-11-21T11:32:49,446 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T11:32:49,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55683, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T11:32:49,451 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T11:32:49,451 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:49,452 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b462513bfc2%2C39997%2C1732188768708.meta, suffix=.meta, logDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/7b462513bfc2,39997,1732188768708, archiveDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs, maxLogs=32 2024-11-21T11:32:49,453 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7b462513bfc2%2C39997%2C1732188768708.meta.1732188769453.meta 2024-11-21T11:32:49,459 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/7b462513bfc2,39997,1732188768708/7b462513bfc2%2C39997%2C1732188768708.meta.1732188769453.meta 2024-11-21T11:32:49,464 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44737:44737),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-21T11:32:49,466 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T11:32:49,467 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T11:32:49,467 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T11:32:49,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T11:32:49,469 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T11:32:49,469 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:49,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:49,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T11:32:49,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T11:32:49,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:49,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:49,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T11:32:49,471 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T11:32:49,471 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:49,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:49,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T11:32:49,472 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T11:32:49,472 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T11:32:49,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T11:32:49,473 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T11:32:49,473 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740 2024-11-21T11:32:49,474 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740 2024-11-21T11:32:49,475 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T11:32:49,475 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T11:32:49,475 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-21T11:32:49,476 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T11:32:49,477 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695918, jitterRate=-0.11509472131729126}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-21T11:32:49,477 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T11:32:49,478 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732188769467Writing region info on filesystem at 1732188769467Initializing all the Stores at 1732188769468 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188769468Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188769468Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732188769468Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732188769468Cleaning up temporary data from old regions at 1732188769475 (+7 ms)Running coprocessor post-open hooks at 1732188769477 (+2 ms)Region opened successfully at 1732188769478 (+1 ms) 2024-11-21T11:32:49,479 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732188769445 2024-11-21T11:32:49,481 DEBUG [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T11:32:49,481 INFO [RS_OPEN_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T11:32:49,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b462513bfc2,39997,1732188768708 2024-11-21T11:32:49,482 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b462513bfc2,39997,1732188768708, state=OPEN 2024-11-21T11:32:49,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:32:49,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T11:32:49,487 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b462513bfc2,39997,1732188768708 2024-11-21T11:32:49,487 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:49,487 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T11:32:49,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T11:32:49,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b462513bfc2,39997,1732188768708 in 194 msec 2024-11-21T11:32:49,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T11:32:49,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-21T11:32:49,492 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T11:32:49,492 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T11:32:49,493 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:32:49,493 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,39997,1732188768708, seqNum=-1] 2024-11-21T11:32:49,493 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:32:49,495 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37065, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:32:49,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 646 msec 2024-11-21T11:32:49,499 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732188769499, completionTime=-1 2024-11-21T11:32:49,499 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T11:32:49,499 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732188829505 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732188889505 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 5 msec 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,505 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,506 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,506 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b462513bfc2:40089, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,506 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,507 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,508 DEBUG [master/7b462513bfc2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.766sec 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T11:32:49,510 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T11:32:49,512 DEBUG [master/7b462513bfc2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T11:32:49,512 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T11:32:49,513 INFO [master/7b462513bfc2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b462513bfc2,40089,1732188768666-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T11:32:49,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@477d322e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:49,522 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b462513bfc2,40089,-1 for getting cluster id 2024-11-21T11:32:49,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T11:32:49,524 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9ece5fe1-6a85-4a48-a793-27f1f96f785b' 2024-11-21T11:32:49,524 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T11:32:49,525 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9ece5fe1-6a85-4a48-a793-27f1f96f785b" 2024-11-21T11:32:49,525 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3429cfaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:49,525 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b462513bfc2,40089,-1] 2024-11-21T11:32:49,525 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T11:32:49,525 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,526 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T11:32:49,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@403020f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T11:32:49,527 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T11:32:49,528 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b462513bfc2,39997,1732188768708, seqNum=-1] 2024-11-21T11:32:49,529 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T11:32:49,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47652, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T11:32:49,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b462513bfc2,40089,1732188768666 2024-11-21T11:32:49,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T11:32:49,533 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-21T11:32:49,533 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-21T11:32:49,535 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs, maxLogs=32 2024-11-21T11:32:49,536 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732188769535 2024-11-21T11:32:49,540 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1/test.com%2C8080%2C1.1732188769535 2024-11-21T11:32:49,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44737:44737),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-21T11:32:49,545 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732188769545 2024-11-21T11:32:49,551 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,551 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,551 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,551 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,551 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,552 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1/test.com%2C8080%2C1.1732188769535 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1/test.com%2C8080%2C1.1732188769545 2024-11-21T11:32:49,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44737:44737),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-21T11:32:49,554 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1/test.com%2C8080%2C1.1732188769535 is not closed yet, will try archiving it next time 2024-11-21T11:32:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741835_1011 (size=93) 2024-11-21T11:32:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741835_1011 (size=93) 2024-11-21T11:32:49,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,559 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/WALs/test.com,8080,1/test.com%2C8080%2C1.1732188769535 to hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs/test.com%2C8080%2C1.1732188769535 2024-11-21T11:32:49,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741836_1012 (size=93) 2024-11-21T11:32:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741836_1012 (size=93) 2024-11-21T11:32:49,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs 2024-11-21T11:32:49,565 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732188769545) 2024-11-21T11:32:49,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T11:32:49,565 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:49,566 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:49,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,566 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T11:32:49,566 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T11:32:49,566 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=445687222, stopped=false 2024-11-21T11:32:49,566 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b462513bfc2,40089,1732188768666 2024-11-21T11:32:49,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:49,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T11:32:49,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:49,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:49,568 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:49,568 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T11:32:49,568 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:49,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,568 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b462513bfc2,39997,1732188768708' ***** 2024-11-21T11:32:49,568 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T11:32:49,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:49,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T11:32:49,568 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T11:32:49,568 INFO [RS:0;7b462513bfc2:39997 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T11:32:49,568 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T11:32:49,568 INFO [RS:0;7b462513bfc2:39997 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(959): stopping server 7b462513bfc2,39997,1732188768708 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b462513bfc2:39997. 2024-11-21T11:32:49,569 DEBUG [RS:0;7b462513bfc2:39997 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T11:32:49,569 DEBUG [RS:0;7b462513bfc2:39997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T11:32:49,569 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T11:32:49,569 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T11:32:49,569 DEBUG [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T11:32:49,569 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T11:32:49,569 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T11:32:49,569 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T11:32:49,569 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T11:32:49,569 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T11:32:49,570 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-21T11:32:49,585 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/.tmp/ns/3c5a340ed52347e9b0d8238fbbe259ee is 43, key is default/ns:d/1732188769495/Put/seqid=0 2024-11-21T11:32:49,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741837_1013 (size=5153) 2024-11-21T11:32:49,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741837_1013 (size=5153) 2024-11-21T11:32:49,591 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/.tmp/ns/3c5a340ed52347e9b0d8238fbbe259ee 2024-11-21T11:32:49,596 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/.tmp/ns/3c5a340ed52347e9b0d8238fbbe259ee as hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/ns/3c5a340ed52347e9b0d8238fbbe259ee 2024-11-21T11:32:49,600 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/ns/3c5a340ed52347e9b0d8238fbbe259ee, entries=2, sequenceid=6, filesize=5.0 K 2024-11-21T11:32:49,601 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-21T11:32:49,604 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T11:32:49,605 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T11:32:49,605 INFO [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:49,605 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732188769569Running coprocessor pre-close hooks at 1732188769569Disabling compacts and flushes for region at 1732188769569Disabling writes for close at 1732188769569Obtaining lock to block concurrent updates at 1732188769570 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732188769570Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732188769570Flushing stores of hbase:meta,,1.1588230740 at 1732188769570Flushing 1588230740/ns: creating writer at 1732188769571 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732188769585 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732188769585Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47bca529: reopening flushed file at 1732188769595 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1732188769601 (+6 ms)Writing region close event to WAL at 1732188769602 (+1 ms)Running coprocessor post-close hooks at 1732188769605 (+3 ms)Closed at 1732188769605 2024-11-21T11:32:49,605 DEBUG [RS_CLOSE_META-regionserver/7b462513bfc2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T11:32:49,769 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(976): stopping server 7b462513bfc2,39997,1732188768708; all regions closed. 2024-11-21T11:32:49,770 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,770 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,770 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,770 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,770 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741834_1010 (size=1152) 2024-11-21T11:32:49,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741834_1010 (size=1152) 2024-11-21T11:32:49,775 DEBUG [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs 2024-11-21T11:32:49,775 INFO [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C39997%2C1732188768708.meta:.meta(num 1732188769453) 2024-11-21T11:32:49,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,776 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741833_1009 (size=93) 2024-11-21T11:32:49,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741833_1009 (size=93) 2024-11-21T11:32:49,779 DEBUG [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/oldWALs 2024-11-21T11:32:49,779 INFO [RS:0;7b462513bfc2:39997 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7b462513bfc2%2C39997%2C1732188768708:(num 1732188769086) 2024-11-21T11:32:49,779 DEBUG [RS:0;7b462513bfc2:39997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T11:32:49,779 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T11:32:49,779 INFO [RS:0;7b462513bfc2:39997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:49,779 INFO [RS:0;7b462513bfc2:39997 {}] hbase.ChoreService(370): Chore service for: regionserver/7b462513bfc2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:49,780 INFO [RS:0;7b462513bfc2:39997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:49,780 INFO [regionserver/7b462513bfc2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:49,780 INFO [RS:0;7b462513bfc2:39997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39997 2024-11-21T11:32:49,782 INFO [RS:0;7b462513bfc2:39997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:49,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b462513bfc2,39997,1732188768708 2024-11-21T11:32:49,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T11:32:49,783 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b462513bfc2,39997,1732188768708] 2024-11-21T11:32:49,785 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b462513bfc2,39997,1732188768708 already deleted, retry=false 2024-11-21T11:32:49,785 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b462513bfc2,39997,1732188768708 expired; onlineServers=0 2024-11-21T11:32:49,785 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b462513bfc2,40089,1732188768666' ***** 2024-11-21T11:32:49,785 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T11:32:49,785 INFO [M:0;7b462513bfc2:40089 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T11:32:49,785 INFO [M:0;7b462513bfc2:40089 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T11:32:49,785 DEBUG [M:0;7b462513bfc2:40089 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T11:32:49,786 DEBUG [M:0;7b462513bfc2:40089 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T11:32:49,786 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T11:32:49,786 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188768857 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.large.0-1732188768857,5,FailOnTimeoutGroup] 2024-11-21T11:32:49,786 DEBUG [master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188768858 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b462513bfc2:0:becomeActiveMaster-HFileCleaner.small.0-1732188768858,5,FailOnTimeoutGroup] 2024-11-21T11:32:49,786 INFO [M:0;7b462513bfc2:40089 {}] hbase.ChoreService(370): Chore service for: master/7b462513bfc2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T11:32:49,786 INFO [M:0;7b462513bfc2:40089 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T11:32:49,786 DEBUG [M:0;7b462513bfc2:40089 {}] master.HMaster(1795): Stopping service threads 2024-11-21T11:32:49,786 INFO [M:0;7b462513bfc2:40089 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T11:32:49,786 INFO [M:0;7b462513bfc2:40089 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T11:32:49,786 INFO [M:0;7b462513bfc2:40089 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T11:32:49,786 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T11:32:49,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T11:32:49,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T11:32:49,787 DEBUG [M:0;7b462513bfc2:40089 {}] zookeeper.ZKUtil(347): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T11:32:49,787 WARN [M:0;7b462513bfc2:40089 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T11:32:49,787 INFO [M:0;7b462513bfc2:40089 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/.lastflushedseqids 2024-11-21T11:32:49,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741838_1014 (size=99) 2024-11-21T11:32:49,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741838_1014 (size=99) 2024-11-21T11:32:49,792 INFO [M:0;7b462513bfc2:40089 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T11:32:49,793 INFO [M:0;7b462513bfc2:40089 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T11:32:49,793 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T11:32:49,793 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:49,793 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:49,793 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T11:32:49,793 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:49,793 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-21T11:32:49,808 DEBUG [M:0;7b462513bfc2:40089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35eca6b295ee4da682e4796573dfd1bc is 82, key is hbase:meta,,1/info:regioninfo/1732188769481/Put/seqid=0 2024-11-21T11:32:49,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741839_1015 (size=5672) 2024-11-21T11:32:49,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741839_1015 (size=5672) 2024-11-21T11:32:49,813 INFO [M:0;7b462513bfc2:40089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35eca6b295ee4da682e4796573dfd1bc 2024-11-21T11:32:49,830 DEBUG [M:0;7b462513bfc2:40089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9ecf3bdf2c74d54a38a24e624d6846b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732188769499/Put/seqid=0 2024-11-21T11:32:49,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741840_1016 (size=5275) 2024-11-21T11:32:49,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741840_1016 (size=5275) 2024-11-21T11:32:49,835 INFO [M:0;7b462513bfc2:40089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9ecf3bdf2c74d54a38a24e624d6846b 2024-11-21T11:32:49,853 DEBUG [M:0;7b462513bfc2:40089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a7bd7bc580d41bcbb033dc14272f4d2 is 69, key is 7b462513bfc2,39997,1732188768708/rs:state/1732188768943/Put/seqid=0 2024-11-21T11:32:49,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741841_1017 (size=5156) 2024-11-21T11:32:49,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741841_1017 (size=5156) 2024-11-21T11:32:49,858 INFO [M:0;7b462513bfc2:40089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a7bd7bc580d41bcbb033dc14272f4d2 2024-11-21T11:32:49,875 DEBUG [M:0;7b462513bfc2:40089 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0f9f000bc90e48dca0e4d8f2d77aaef7 is 52, key is load_balancer_on/state:d/1732188769532/Put/seqid=0 2024-11-21T11:32:49,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741842_1018 (size=5056) 2024-11-21T11:32:49,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741842_1018 (size=5056) 2024-11-21T11:32:49,880 INFO [M:0;7b462513bfc2:40089 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0f9f000bc90e48dca0e4d8f2d77aaef7 2024-11-21T11:32:49,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:49,883 INFO [RS:0;7b462513bfc2:39997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:49,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39997-0x1013a4cf41c0001, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:49,883 INFO [RS:0;7b462513bfc2:39997 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b462513bfc2,39997,1732188768708; zookeeper connection closed. 2024-11-21T11:32:49,883 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@dcbc3e1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@dcbc3e1 2024-11-21T11:32:49,884 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T11:32:49,884 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/35eca6b295ee4da682e4796573dfd1bc as hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35eca6b295ee4da682e4796573dfd1bc 2024-11-21T11:32:49,888 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/35eca6b295ee4da682e4796573dfd1bc, entries=8, sequenceid=29, filesize=5.5 K 2024-11-21T11:32:49,889 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c9ecf3bdf2c74d54a38a24e624d6846b as hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9ecf3bdf2c74d54a38a24e624d6846b 2024-11-21T11:32:49,893 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c9ecf3bdf2c74d54a38a24e624d6846b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-21T11:32:49,894 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4a7bd7bc580d41bcbb033dc14272f4d2 as hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4a7bd7bc580d41bcbb033dc14272f4d2 2024-11-21T11:32:49,898 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4a7bd7bc580d41bcbb033dc14272f4d2, entries=1, sequenceid=29, filesize=5.0 K 2024-11-21T11:32:49,898 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0f9f000bc90e48dca0e4d8f2d77aaef7 as hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0f9f000bc90e48dca0e4d8f2d77aaef7 2024-11-21T11:32:49,902 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43373/user/jenkins/test-data/614e7520-62b6-b01d-2574-b7e7c9baeb17/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0f9f000bc90e48dca0e4d8f2d77aaef7, entries=1, sequenceid=29, filesize=4.9 K 2024-11-21T11:32:49,903 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-21T11:32:49,905 INFO [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T11:32:49,905 DEBUG [M:0;7b462513bfc2:40089 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732188769793Disabling compacts and flushes for region at 1732188769793Disabling writes for close at 1732188769793Obtaining lock to block concurrent updates at 1732188769793Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732188769793Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732188769793Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732188769794 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732188769794Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732188769808 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732188769808Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732188769816 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732188769830 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732188769830Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732188769839 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732188769852 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732188769853 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732188769862 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732188769875 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732188769875Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73f4116e: reopening flushed file at 1732188769883 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4878270a: reopening flushed file at 1732188769888 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c9c76ca: reopening flushed file at 1732188769893 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@262eb9f7: reopening flushed file at 1732188769898 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1732188769903 (+5 ms)Writing region close event to WAL at 1732188769904 (+1 ms)Closed at 1732188769904 2024-11-21T11:32:49,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,905 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-21T11:32:49,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32867 is added to blk_1073741830_1006 (size=10311) 2024-11-21T11:32:49,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43833 is added to blk_1073741830_1006 (size=10311) 2024-11-21T11:32:49,908 INFO [M:0;7b462513bfc2:40089 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T11:32:49,908 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T11:32:49,908 INFO [M:0;7b462513bfc2:40089 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40089 2024-11-21T11:32:49,909 INFO [M:0;7b462513bfc2:40089 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T11:32:50,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:50,010 INFO [M:0;7b462513bfc2:40089 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T11:32:50,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40089-0x1013a4cf41c0000, quorum=127.0.0.1:52079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T11:32:50,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@87b2e2b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:50,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@380ffe40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:50,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:50,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7748f5df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:50,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@814e400{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:50,014 WARN [BP-1095317787-172.17.0.2-1732188768016 heartbeating to localhost/127.0.0.1:43373 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:50,014 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:50,014 WARN [BP-1095317787-172.17.0.2-1732188768016 heartbeating to localhost/127.0.0.1:43373 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1095317787-172.17.0.2-1732188768016 (Datanode Uuid 3a485a92-c4b4-4957-8ae0-8a93aa42cdde) service to localhost/127.0.0.1:43373 2024-11-21T11:32:50,014 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:50,015 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data3/current/BP-1095317787-172.17.0.2-1732188768016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:50,015 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data4/current/BP-1095317787-172.17.0.2-1732188768016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:50,015 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:50,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79ca80d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T11:32:50,017 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@150dab73{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:50,017 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:50,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e32ebb8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:50,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8a9439{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:50,019 WARN [BP-1095317787-172.17.0.2-1732188768016 heartbeating to localhost/127.0.0.1:43373 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T11:32:50,019 WARN [BP-1095317787-172.17.0.2-1732188768016 heartbeating to localhost/127.0.0.1:43373 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1095317787-172.17.0.2-1732188768016 (Datanode Uuid 82c04bef-0e61-4c22-8d12-bba6c6ae16cc) service to localhost/127.0.0.1:43373 2024-11-21T11:32:50,019 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T11:32:50,019 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T11:32:50,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data1/current/BP-1095317787-172.17.0.2-1732188768016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:50,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/cluster_a45dd359-a9ed-264a-f38d-1a947582d854/data/data2/current/BP-1095317787-172.17.0.2-1732188768016 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T11:32:50,020 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T11:32:50,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54644e01{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T11:32:50,026 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@134d2ab8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T11:32:50,026 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T11:32:50,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@512e80eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T11:32:50,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32d01bcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3dd6b9da-cf89-b8f6-e967-544c0d517e41/hadoop.log.dir/,STOPPED} 2024-11-21T11:32:50,032 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T11:32:50,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T11:32:50,054 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 231) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43373 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43373 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43373 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43373 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43373 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43373 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43373 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43373 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=540 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 58) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4901 (was 4907)